1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #include <linux/acpi.h>
6 #include <linux/bitmap.h>
7 #include <linux/dma-mapping.h>
10 #include <linux/irqreturn.h>
11 #include <linux/log2.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/seq_file.h>
14 #include <linux/slab.h>
15 #include <linux/uacce.h>
16 #include <linux/uaccess.h>
17 #include <uapi/misc/uacce/hisi_qm.h>
18 #include <linux/hisi_acc_qm.h>
20 /* eq/aeq irq enable */
21 #define QM_VF_AEQ_INT_SOURCE 0x0
22 #define QM_VF_AEQ_INT_MASK 0x4
23 #define QM_VF_EQ_INT_SOURCE 0x8
24 #define QM_VF_EQ_INT_MASK 0xc
25 #define QM_IRQ_NUM_V1 1
26 #define QM_IRQ_NUM_PF_V2 4
27 #define QM_IRQ_NUM_VF_V2 2
28 #define QM_IRQ_NUM_VF_V3 3
30 #define QM_EQ_EVENT_IRQ_VECTOR 0
31 #define QM_AEQ_EVENT_IRQ_VECTOR 1
32 #define QM_CMD_EVENT_IRQ_VECTOR 2
33 #define QM_ABNORMAL_EVENT_IRQ_VECTOR 3
36 #define QM_MB_PING_ALL_VFS 0xffff
37 #define QM_MB_CMD_DATA_SHIFT 32
38 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0)
41 #define QM_SQ_HOP_NUM_SHIFT 0
42 #define QM_SQ_PAGE_SIZE_SHIFT 4
43 #define QM_SQ_BUF_SIZE_SHIFT 8
44 #define QM_SQ_SQE_SIZE_SHIFT 12
45 #define QM_SQ_PRIORITY_SHIFT 0
46 #define QM_SQ_ORDERS_SHIFT 4
47 #define QM_SQ_TYPE_SHIFT 8
48 #define QM_QC_PASID_ENABLE 0x1
49 #define QM_QC_PASID_ENABLE_SHIFT 7
51 #define QM_SQ_TYPE_MASK GENMASK(3, 0)
52 #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
55 #define QM_CQ_HOP_NUM_SHIFT 0
56 #define QM_CQ_PAGE_SIZE_SHIFT 4
57 #define QM_CQ_BUF_SIZE_SHIFT 8
58 #define QM_CQ_CQE_SIZE_SHIFT 12
59 #define QM_CQ_PHASE_SHIFT 0
60 #define QM_CQ_FLAG_SHIFT 1
62 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1)
63 #define QM_QC_CQE_SIZE 4
64 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
67 #define QM_EQE_AEQE_SIZE (2UL << 12)
68 #define QM_EQC_PHASE_SHIFT 16
70 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
71 #define QM_EQE_CQN_MASK GENMASK(15, 0)
73 #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
74 #define QM_AEQE_TYPE_SHIFT 17
75 #define QM_AEQE_CQN_MASK GENMASK(15, 0)
76 #define QM_CQ_OVERFLOW 0
77 #define QM_EQ_OVERFLOW 1
78 #define QM_CQE_ERROR 2
80 #define QM_DOORBELL_CMD_SQ 0
81 #define QM_DOORBELL_CMD_CQ 1
82 #define QM_DOORBELL_CMD_EQ 2
83 #define QM_DOORBELL_CMD_AEQ 3
85 #define QM_DOORBELL_BASE_V1 0x340
86 #define QM_DB_CMD_SHIFT_V1 16
87 #define QM_DB_INDEX_SHIFT_V1 32
88 #define QM_DB_PRIORITY_SHIFT_V1 48
89 #define QM_QUE_ISO_CFG_V 0x0030
90 #define QM_PAGE_SIZE 0x0034
91 #define QM_QUE_ISO_EN 0x100154
92 #define QM_CAPBILITY 0x100158
93 #define QM_QP_NUN_MASK GENMASK(10, 0)
94 #define QM_QP_DB_INTERVAL 0x10000
96 #define QM_MEM_START_INIT 0x100040
97 #define QM_MEM_INIT_DONE 0x100044
98 #define QM_VFT_CFG_RDY 0x10006c
99 #define QM_VFT_CFG_OP_WR 0x100058
100 #define QM_VFT_CFG_TYPE 0x10005c
101 #define QM_SQC_VFT 0x0
102 #define QM_CQC_VFT 0x1
103 #define QM_VFT_CFG 0x100060
104 #define QM_VFT_CFG_OP_ENABLE 0x100054
105 #define QM_PM_CTRL 0x100148
106 #define QM_IDLE_DISABLE BIT(9)
108 #define QM_VFT_CFG_DATA_L 0x100064
109 #define QM_VFT_CFG_DATA_H 0x100068
110 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8)
111 #define QM_SQC_VFT_SQC_SIZE (5ULL << 12)
112 #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16)
113 #define QM_SQC_VFT_START_SQN_SHIFT 28
114 #define QM_SQC_VFT_VALID (1ULL << 44)
115 #define QM_SQC_VFT_SQN_SHIFT 45
116 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8)
117 #define QM_CQC_VFT_SQC_SIZE (5ULL << 12)
118 #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16)
119 #define QM_CQC_VFT_VALID (1ULL << 28)
121 #define QM_SQC_VFT_BASE_SHIFT_V2 28
122 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0)
123 #define QM_SQC_VFT_NUM_SHIFT_V2 45
124 #define QM_SQC_VFT_NUM_MASK_v2 GENMASK(9, 0)
126 #define QM_DFX_CNT_CLR_CE 0x100118
128 #define QM_ABNORMAL_INT_SOURCE 0x100000
129 #define QM_ABNORMAL_INT_SOURCE_CLR GENMASK(14, 0)
130 #define QM_ABNORMAL_INT_MASK 0x100004
131 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff
132 #define QM_ABNORMAL_INT_STATUS 0x100008
133 #define QM_ABNORMAL_INT_SET 0x10000c
134 #define QM_ABNORMAL_INF00 0x100010
135 #define QM_FIFO_OVERFLOW_TYPE 0xc0
136 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6
137 #define QM_FIFO_OVERFLOW_VF 0x3f
138 #define QM_ABNORMAL_INF01 0x100014
139 #define QM_DB_TIMEOUT_TYPE 0xc0
140 #define QM_DB_TIMEOUT_TYPE_SHIFT 6
141 #define QM_DB_TIMEOUT_VF 0x3f
142 #define QM_RAS_CE_ENABLE 0x1000ec
143 #define QM_RAS_FE_ENABLE 0x1000f0
144 #define QM_RAS_NFE_ENABLE 0x1000f4
145 #define QM_RAS_CE_THRESHOLD 0x1000f8
146 #define QM_RAS_CE_TIMES_PER_IRQ 1
147 #define QM_RAS_MSI_INT_SEL 0x1040f4
148 #define QM_OOO_SHUTDOWN_SEL 0x1040f8
150 #define QM_RESET_WAIT_TIMEOUT 400
151 #define QM_PEH_VENDOR_ID 0x1000d8
152 #define ACC_VENDOR_ID_VALUE 0x5a5a
153 #define QM_PEH_DFX_INFO0 0x1000fc
154 #define QM_PEH_DFX_INFO1 0x100100
155 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2))
156 #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16)
157 #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3
158 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0)
159 #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1
160 #define ACC_MASTER_TRANS_RETURN_RW 3
161 #define ACC_MASTER_TRANS_RETURN 0x300150
162 #define ACC_MASTER_GLOBAL_CTRL 0x300000
163 #define ACC_AM_CFG_PORT_WR_EN 0x30001c
164 #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
165 #define ACC_AM_ROB_ECC_INT_STS 0x300104
166 #define ACC_ROB_ECC_ERR_MULTPL BIT(1)
167 #define QM_MSI_CAP_ENABLE BIT(16)
169 /* interfunction communication */
170 #define QM_IFC_READY_STATUS 0x100128
171 #define QM_IFC_C_STS_M 0x10012C
172 #define QM_IFC_INT_SET_P 0x100130
173 #define QM_IFC_INT_CFG 0x100134
174 #define QM_IFC_INT_SOURCE_P 0x100138
175 #define QM_IFC_INT_SOURCE_V 0x0020
176 #define QM_IFC_INT_MASK 0x0024
177 #define QM_IFC_INT_STATUS 0x0028
178 #define QM_IFC_INT_SET_V 0x002C
179 #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0)
180 #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0)
181 #define QM_IFC_INT_SOURCE_MASK BIT(0)
182 #define QM_IFC_INT_DISABLE BIT(0)
183 #define QM_IFC_INT_STATUS_MASK BIT(0)
184 #define QM_IFC_INT_SET_MASK BIT(0)
185 #define QM_WAIT_DST_ACK 10
186 #define QM_MAX_PF_WAIT_COUNT 10
187 #define QM_MAX_VF_WAIT_COUNT 40
188 #define QM_VF_RESET_WAIT_US 20000
189 #define QM_VF_RESET_WAIT_CNT 3000
190 #define QM_VF_RESET_WAIT_TIMEOUT_US \
191 (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT)
193 #define QM_DFX_MB_CNT_VF 0x104010
194 #define QM_DFX_DB_CNT_VF 0x104020
195 #define QM_DFX_SQE_CNT_VF_SQN 0x104030
196 #define QM_DFX_CQE_CNT_VF_CQN 0x104040
197 #define QM_DFX_QN_SHIFT 16
198 #define CURRENT_FUN_MASK GENMASK(5, 0)
199 #define CURRENT_Q_MASK GENMASK(31, 16)
201 #define POLL_PERIOD 10
202 #define POLL_TIMEOUT 1000
203 #define WAIT_PERIOD_US_MAX 200
204 #define WAIT_PERIOD_US_MIN 100
205 #define MAX_WAIT_COUNTS 1000
206 #define QM_CACHE_WB_START 0x204
207 #define QM_CACHE_WB_DONE 0x208
211 #define QM_SQE_DATA_ALIGN_MASK GENMASK(6, 0)
212 #define QMC_ALIGN(sz) ALIGN(sz, 32)
214 #define QM_DBG_READ_LEN 256
215 #define QM_DBG_WRITE_LEN 1024
216 #define QM_DBG_TMP_BUF_LEN 22
217 #define QM_PCI_COMMAND_INVALID ~0
218 #define QM_RESET_STOP_TX_OFFSET 1
219 #define QM_RESET_STOP_RX_OFFSET 2
221 #define WAIT_PERIOD 20
222 #define REMOVE_WAIT_DELAY 10
223 #define QM_SQE_ADDR_MASK GENMASK(7, 0)
224 #define QM_EQ_DEPTH (1024 * 2)
226 #define QM_DRIVER_REMOVING 0
227 #define QM_RST_SCHED 1
228 #define QM_RESETTING 2
229 #define QM_QOS_PARAM_NUM 2
230 #define QM_QOS_VAL_NUM 1
231 #define QM_QOS_BDF_PARAM_NUM 4
232 #define QM_QOS_MAX_VAL 1000
233 #define QM_QOS_RATE 100
234 #define QM_QOS_EXPAND_RATE 1000
235 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0)
236 #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8)
237 #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11)
238 #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8
239 #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11
240 #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15
241 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19
242 #define QM_SHAPER_CBS_B 1
243 #define QM_SHAPER_CBS_S 16
244 #define QM_SHAPER_VFT_OFFSET 6
245 #define WAIT_FOR_QOS_VF 100
246 #define QM_QOS_MIN_ERROR_RATE 5
247 #define QM_QOS_TYPICAL_NUM 8
248 #define QM_SHAPER_MIN_CBS_S 8
249 #define QM_QOS_TICK 0x300U
250 #define QM_QOS_DIVISOR_CLK 0x1f40U
251 #define QM_QOS_MAX_CIR_B 200
252 #define QM_QOS_MIN_CIR_B 100
253 #define QM_QOS_MAX_CIR_U 6
254 #define QM_QOS_MAX_CIR_S 11
255 #define QM_QOS_VAL_MAX_LEN 32
256 #define QM_DFX_BASE 0x0100000
257 #define QM_DFX_STATE1 0x0104000
258 #define QM_DFX_STATE2 0x01040C8
259 #define QM_DFX_COMMON 0x0000
260 #define QM_DFX_BASE_LEN 0x5A
261 #define QM_DFX_STATE1_LEN 0x2E
262 #define QM_DFX_STATE2_LEN 0x11
263 #define QM_DFX_COMMON_LEN 0xC3
264 #define QM_DFX_REGS_LEN 4UL
265 #define QM_AUTOSUSPEND_DELAY 3000
267 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
268 (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \
269 ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \
270 ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \
271 ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
273 #define QM_MK_CQC_DW3_V2(cqe_sz) \
274 ((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
276 #define QM_MK_SQC_W13(priority, orders, alg_type) \
277 (((priority) << QM_SQ_PRIORITY_SHIFT) | \
278 ((orders) << QM_SQ_ORDERS_SHIFT) | \
279 (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
281 #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
282 (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \
283 ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \
284 ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \
285 ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
287 #define QM_MK_SQC_DW3_V2(sqe_sz) \
288 ((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
290 #define INIT_QC_COMMON(qc, base, pasid) do { \
293 (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \
294 (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \
298 (qc)->pasid = cpu_to_le16(pasid); \
309 enum acc_err_result {
321 QM_PF_FLR_PREPARE = 0x01,
414 struct hisi_qm_resource {
417 struct list_head list;
420 struct hisi_qm_hw_ops {
421 int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
422 void (*qm_db)(struct hisi_qm *qm, u16 qn,
423 u8 cmd, u16 index, u8 priority);
424 u32 (*get_irq_num)(struct hisi_qm *qm);
425 int (*debug_init)(struct hisi_qm *qm);
426 void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
427 void (*hw_error_uninit)(struct hisi_qm *qm);
428 enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
429 int (*stop_qp)(struct hisi_qp *qp);
430 int (*set_msi)(struct hisi_qm *qm, bool set);
431 int (*ping_all_vfs)(struct hisi_qm *qm, u64 cmd);
432 int (*ping_pf)(struct hisi_qm *qm, u64 cmd);
440 static struct qm_dfx_item qm_dfx_files[] = {
441 {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
442 {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
443 {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
444 {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
445 {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
448 static const char * const qm_debug_file_name[] = {
449 [CURRENT_QM] = "current_qm",
450 [CURRENT_Q] = "current_q",
451 [CLEAR_ENABLE] = "clear_enable",
454 struct hisi_qm_hw_error {
459 static const struct hisi_qm_hw_error qm_hw_error[] = {
460 { .int_msk = BIT(0), .msg = "qm_axi_rresp" },
461 { .int_msk = BIT(1), .msg = "qm_axi_bresp" },
462 { .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
463 { .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
464 { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
465 { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
466 { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
467 { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
468 { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
469 { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
470 { .int_msk = BIT(10), .msg = "qm_db_timeout" },
471 { .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
472 { .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
473 { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" },
474 { .int_msk = BIT(14), .msg = "qm_flr_timeout" },
478 /* define the QM's dfx regs region and region length */
479 static struct dfx_diff_registers qm_diff_regs[] = {
481 .reg_offset = QM_DFX_BASE,
482 .reg_len = QM_DFX_BASE_LEN,
484 .reg_offset = QM_DFX_STATE1,
485 .reg_len = QM_DFX_STATE1_LEN,
487 .reg_offset = QM_DFX_STATE2,
488 .reg_len = QM_DFX_STATE2_LEN,
490 .reg_offset = QM_DFX_COMMON,
491 .reg_len = QM_DFX_COMMON_LEN,
495 static const char * const qm_db_timeout[] = {
496 "sq", "cq", "eq", "aeq",
499 static const char * const qm_fifo_overflow[] = {
503 static const char * const qm_s[] = {
504 "init", "start", "close", "stop",
507 static const char * const qp_s[] = {
508 "none", "init", "start", "stop", "close",
511 struct qm_typical_qos_table {
517 /* the qos step is 100 */
518 static struct qm_typical_qos_table shaper_cir_s[] = {
526 static struct qm_typical_qos_table shaper_cbs_s[] = {
536 static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
538 enum qm_state curr = atomic_read(&qm->status.flags);
543 if (new == QM_START || new == QM_CLOSE)
551 if (new == QM_CLOSE || new == QM_START)
558 dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
559 qm_s[curr], qm_s[new]);
562 dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
563 qm_s[curr], qm_s[new]);
568 static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
571 enum qm_state qm_curr = atomic_read(&qm->status.flags);
572 enum qp_state qp_curr = 0;
576 qp_curr = atomic_read(&qp->qp_status.flags);
580 if (qm_curr == QM_START || qm_curr == QM_INIT)
584 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
585 (qm_curr == QM_START && qp_curr == QP_STOP))
589 if ((qm_curr == QM_START && qp_curr == QP_START) ||
590 (qp_curr == QP_INIT))
594 if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
595 (qm_curr == QM_START && qp_curr == QP_STOP) ||
596 (qm_curr == QM_STOP && qp_curr == QP_STOP) ||
597 (qm_curr == QM_STOP && qp_curr == QP_INIT))
604 dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
605 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
608 dev_warn(&qm->pdev->dev,
609 "Can not change qp state from %s to %s in QM %s\n",
610 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
615 static u32 qm_get_hw_error_status(struct hisi_qm *qm)
617 return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
620 static u32 qm_get_dev_err_status(struct hisi_qm *qm)
622 return qm->err_ini->get_dev_hw_err_status(qm);
625 /* Check if the error causes the master ooo block */
626 static int qm_check_dev_error(struct hisi_qm *qm)
630 if (qm->fun_type == QM_HW_VF)
633 val = qm_get_hw_error_status(qm);
634 dev_val = qm_get_dev_err_status(qm);
636 if (qm->ver < QM_HW_V3)
637 return (val & QM_ECC_MBIT) ||
638 (dev_val & qm->err_info.ecc_2bits_mask);
640 return (val & readl(qm->io_base + QM_OOO_SHUTDOWN_SEL)) ||
641 (dev_val & (~qm->err_info.dev_ce_mask));
644 static int qm_wait_reset_finish(struct hisi_qm *qm)
648 /* All reset requests need to be queued for processing */
649 while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
651 if (delay > QM_RESET_WAIT_TIMEOUT)
658 static int qm_reset_prepare_ready(struct hisi_qm *qm)
660 struct pci_dev *pdev = qm->pdev;
661 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
664 * PF and VF on host doesnot support resetting at the
665 * same time on Kunpeng920.
667 if (qm->ver < QM_HW_V3)
668 return qm_wait_reset_finish(pf_qm);
670 return qm_wait_reset_finish(qm);
673 static void qm_reset_bit_clear(struct hisi_qm *qm)
675 struct pci_dev *pdev = qm->pdev;
676 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
678 if (qm->ver < QM_HW_V3)
679 clear_bit(QM_RESETTING, &pf_qm->misc_ctl);
681 clear_bit(QM_RESETTING, &qm->misc_ctl);
684 static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd,
685 u64 base, u16 queue, bool op)
687 mailbox->w0 = cpu_to_le16((cmd) |
688 ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) |
689 (0x1 << QM_MB_BUSY_SHIFT));
690 mailbox->queue_num = cpu_to_le16(queue);
691 mailbox->base_l = cpu_to_le32(lower_32_bits(base));
692 mailbox->base_h = cpu_to_le32(upper_32_bits(base));
696 /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
697 int hisi_qm_wait_mb_ready(struct hisi_qm *qm)
701 return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
702 val, !((val >> QM_MB_BUSY_SHIFT) &
703 0x1), POLL_PERIOD, POLL_TIMEOUT);
705 EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready);
707 /* 128 bit should be written to hardware at one time to trigger a mailbox */
708 static void qm_mb_write(struct hisi_qm *qm, const void *src)
710 void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
711 unsigned long tmp0 = 0, tmp1 = 0;
713 if (!IS_ENABLED(CONFIG_ARM64)) {
714 memcpy_toio(fun_base, src, 16);
719 asm volatile("ldp %0, %1, %3\n"
724 "+Q" (*((char __iomem *)fun_base))
725 : "Q" (*((char *)src))
729 static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox)
731 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
732 dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
736 qm_mb_write(qm, mailbox);
738 if (unlikely(hisi_qm_wait_mb_ready(qm))) {
739 dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
746 atomic64_inc(&qm->debug.dfx.mb_err_cnt);
750 int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
753 struct qm_mailbox mailbox;
756 dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
757 queue, cmd, (unsigned long long)dma_addr);
759 qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op);
761 mutex_lock(&qm->mailbox_lock);
762 ret = qm_mb_nolock(qm, &mailbox);
763 mutex_unlock(&qm->mailbox_lock);
767 EXPORT_SYMBOL_GPL(hisi_qm_mb);
769 static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
773 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
774 ((u64)index << QM_DB_INDEX_SHIFT_V1) |
775 ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
777 writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
780 static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
782 void __iomem *io_base = qm->io_base;
786 if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
787 io_base = qm->db_io_base + (u64)qn * qm->db_interval +
788 QM_DOORBELL_SQ_CQ_BASE_V2;
790 io_base += QM_DOORBELL_EQ_AEQ_BASE_V2;
792 doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
793 ((u64)randata << QM_DB_RAND_SHIFT_V2) |
794 ((u64)index << QM_DB_INDEX_SHIFT_V2) |
795 ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
797 writeq(doorbell, io_base);
800 static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
802 dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
805 qm->ops->qm_db(qm, qn, cmd, index, priority);
808 static void qm_disable_clock_gate(struct hisi_qm *qm)
812 /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */
813 if (qm->ver < QM_HW_V3)
816 val = readl(qm->io_base + QM_PM_CTRL);
817 val |= QM_IDLE_DISABLE;
818 writel(val, qm->io_base + QM_PM_CTRL);
821 static int qm_dev_mem_reset(struct hisi_qm *qm)
825 writel(0x1, qm->io_base + QM_MEM_START_INIT);
826 return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
827 val & BIT(0), POLL_PERIOD,
831 static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
833 return QM_IRQ_NUM_V1;
836 static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
838 if (qm->fun_type == QM_HW_PF)
839 return QM_IRQ_NUM_PF_V2;
841 return QM_IRQ_NUM_VF_V2;
844 static u32 qm_get_irq_num_v3(struct hisi_qm *qm)
846 if (qm->fun_type == QM_HW_PF)
847 return QM_IRQ_NUM_PF_V2;
849 return QM_IRQ_NUM_VF_V3;
852 static int qm_pm_get_sync(struct hisi_qm *qm)
854 struct device *dev = &qm->pdev->dev;
857 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
860 ret = pm_runtime_resume_and_get(dev);
862 dev_err(dev, "failed to get_sync(%d).\n", ret);
869 static void qm_pm_put_sync(struct hisi_qm *qm)
871 struct device *dev = &qm->pdev->dev;
873 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
876 pm_runtime_mark_last_busy(dev);
877 pm_runtime_put_autosuspend(dev);
880 static void qm_cq_head_update(struct hisi_qp *qp)
882 if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
883 qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
884 qp->qp_status.cq_head = 0;
886 qp->qp_status.cq_head++;
890 static void qm_poll_req_cb(struct hisi_qp *qp)
892 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
893 struct hisi_qm *qm = qp->qm;
895 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
897 qp->req_cb(qp, qp->sqe + qm->sqe_size *
898 le16_to_cpu(cqe->sq_head));
899 qm_cq_head_update(qp);
900 cqe = qp->cqe + qp->qp_status.cq_head;
901 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
902 qp->qp_status.cq_head, 0);
903 atomic_dec(&qp->qp_status.used);
907 qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1);
910 static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data)
912 struct hisi_qm *qm = poll_data->qm;
913 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
917 while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
918 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
919 poll_data->qp_finish_id[eqe_num] = cqn;
922 if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
923 qm->status.eqc_phase = !qm->status.eqc_phase;
925 qm->status.eq_head = 0;
928 qm->status.eq_head++;
931 if (eqe_num == (QM_EQ_DEPTH >> 1) - 1)
935 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
940 static void qm_work_process(struct work_struct *work)
942 struct hisi_qm_poll_data *poll_data =
943 container_of(work, struct hisi_qm_poll_data, work);
944 struct hisi_qm *qm = poll_data->qm;
948 /* Get qp id of completed tasks and re-enable the interrupt. */
949 eqe_num = qm_get_complete_eqe_num(poll_data);
950 for (i = eqe_num - 1; i >= 0; i--) {
951 qp = &qm->qp_array[poll_data->qp_finish_id[i]];
952 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP))
960 if (likely(qp->req_cb))
965 static bool do_qm_irq(struct hisi_qm *qm)
967 struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
968 struct hisi_qm_poll_data *poll_data;
971 if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
974 if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
975 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
976 poll_data = &qm->poll_data[cqn];
977 queue_work(qm->wq, &poll_data->work);
985 static irqreturn_t qm_irq(int irq, void *data)
987 struct hisi_qm *qm = data;
994 atomic64_inc(&qm->debug.dfx.err_irq_cnt);
995 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
1000 static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
1002 struct hisi_qm *qm = data;
1005 val = readl(qm->io_base + QM_IFC_INT_STATUS);
1006 val &= QM_IFC_INT_STATUS_MASK;
1010 schedule_work(&qm->cmd_process);
1015 static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
1019 if (qp->is_in_kernel)
1022 addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset;
1025 /* make sure setup is completed */
1029 static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id)
1031 struct hisi_qp *qp = &qm->qp_array[qp_id];
1033 qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET);
1034 hisi_qm_stop_qp(qp);
1035 qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET);
1038 static void qm_reset_function(struct hisi_qm *qm)
1040 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev));
1041 struct device *dev = &qm->pdev->dev;
1044 if (qm_check_dev_error(pf_qm))
1047 ret = qm_reset_prepare_ready(qm);
1049 dev_err(dev, "reset function not ready\n");
1053 ret = hisi_qm_stop(qm, QM_FLR);
1055 dev_err(dev, "failed to stop qm when reset function\n");
1059 ret = hisi_qm_start(qm);
1061 dev_err(dev, "failed to start qm when reset function\n");
1064 qm_reset_bit_clear(qm);
1067 static irqreturn_t qm_aeq_thread(int irq, void *data)
1069 struct hisi_qm *qm = data;
1070 struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
1073 while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
1074 type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
1075 qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK;
1078 case QM_EQ_OVERFLOW:
1079 dev_err(&qm->pdev->dev, "eq overflow, reset function\n");
1080 qm_reset_function(qm);
1082 case QM_CQ_OVERFLOW:
1083 dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n",
1087 qm_disable_qp(qm, qp_id);
1090 dev_err(&qm->pdev->dev, "unknown error type %u\n",
1095 if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
1096 qm->status.aeqc_phase = !qm->status.aeqc_phase;
1098 qm->status.aeq_head = 0;
1101 qm->status.aeq_head++;
1105 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
1110 static irqreturn_t qm_aeq_irq(int irq, void *data)
1112 struct hisi_qm *qm = data;
1114 atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
1115 if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
1118 return IRQ_WAKE_THREAD;
1121 static void qm_irq_unregister(struct hisi_qm *qm)
1123 struct pci_dev *pdev = qm->pdev;
1125 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
1127 if (qm->ver > QM_HW_V1) {
1128 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
1130 if (qm->fun_type == QM_HW_PF)
1131 free_irq(pci_irq_vector(pdev,
1132 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
1135 if (qm->ver > QM_HW_V2)
1136 free_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR), qm);
1139 static void qm_init_qp_status(struct hisi_qp *qp)
1141 struct hisi_qp_status *qp_status = &qp->qp_status;
1143 qp_status->sq_tail = 0;
1144 qp_status->cq_head = 0;
1145 qp_status->cqc_phase = true;
1146 atomic_set(&qp_status->used, 0);
1149 static void qm_init_prefetch(struct hisi_qm *qm)
1151 struct device *dev = &qm->pdev->dev;
1152 u32 page_type = 0x0;
1154 if (qm->ver < QM_HW_V3)
1157 switch (PAGE_SIZE) {
1168 dev_err(dev, "system page size is not support: %lu, default set to 4KB",
1172 writel(page_type, qm->io_base + QM_PAGE_SIZE);
1176 * acc_shaper_para_calc() Get the IR value by the qos formula, the return value
1177 * is the expected qos calculated.
1179 * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps
1181 * IR_b * (2 ^ IR_u) * 8000
1182 * IR(Mbps) = -------------------------
1185 static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s)
1187 return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) /
1188 (QM_QOS_TICK * (1 << cir_s));
1191 static u32 acc_shaper_calc_cbs_s(u32 ir)
1193 int table_size = ARRAY_SIZE(shaper_cbs_s);
1196 for (i = 0; i < table_size; i++) {
1197 if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end)
1198 return shaper_cbs_s[i].val;
1201 return QM_SHAPER_MIN_CBS_S;
1204 static u32 acc_shaper_calc_cir_s(u32 ir)
1206 int table_size = ARRAY_SIZE(shaper_cir_s);
1209 for (i = 0; i < table_size; i++) {
1210 if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end)
1211 return shaper_cir_s[i].val;
1217 static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor)
1219 u32 cir_b, cir_u, cir_s, ir_calc;
1222 factor->cbs_s = acc_shaper_calc_cbs_s(ir);
1223 cir_s = acc_shaper_calc_cir_s(ir);
1225 for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) {
1226 for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) {
1227 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
1229 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
1230 if (error_rate <= QM_QOS_MIN_ERROR_RATE) {
1231 factor->cir_b = cir_b;
1232 factor->cir_u = cir_u;
1233 factor->cir_s = cir_s;
1242 static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
1243 u32 number, struct qm_shaper_factor *factor)
1250 if (qm->ver == QM_HW_V1) {
1251 tmp = QM_SQC_VFT_BUF_SIZE |
1252 QM_SQC_VFT_SQC_SIZE |
1253 QM_SQC_VFT_INDEX_NUMBER |
1255 (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
1257 tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
1259 (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
1263 if (qm->ver == QM_HW_V1) {
1264 tmp = QM_CQC_VFT_BUF_SIZE |
1265 QM_CQC_VFT_SQC_SIZE |
1266 QM_CQC_VFT_INDEX_NUMBER |
1269 tmp = QM_CQC_VFT_VALID;
1273 if (qm->ver >= QM_HW_V3) {
1274 tmp = factor->cir_b |
1275 (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) |
1276 (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) |
1277 (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) |
1278 (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT);
1284 writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
1285 writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
1288 static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
1289 u32 fun_num, u32 base, u32 number)
1291 struct qm_shaper_factor *factor = &qm->factor[fun_num];
1295 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1296 val & BIT(0), POLL_PERIOD,
1301 writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
1302 writel(type, qm->io_base + QM_VFT_CFG_TYPE);
1303 if (type == SHAPER_VFT)
1304 fun_num |= base << QM_SHAPER_VFT_OFFSET;
1306 writel(fun_num, qm->io_base + QM_VFT_CFG);
1308 qm_vft_data_cfg(qm, type, base, number, factor);
1310 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
1311 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
1313 return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
1314 val & BIT(0), POLL_PERIOD,
1318 static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num)
1320 u32 qos = qm->factor[fun_num].func_qos;
1323 ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]);
1325 dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n");
1328 writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG);
1329 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
1330 /* The base number of queue reuse for different alg type */
1331 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1);
1339 /* The config should be conducted after qm_dev_mem_reset() */
1340 static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
1345 for (i = SQC_VFT; i <= CQC_VFT; i++) {
1346 ret = qm_set_vft_common(qm, i, fun_num, base, number);
1351 /* init default shaper qos val */
1352 if (qm->ver >= QM_HW_V3) {
1353 ret = qm_shaper_init_vft(qm, fun_num);
1360 for (i = SQC_VFT; i <= CQC_VFT; i++) {
1361 ret = qm_set_vft_common(qm, i, fun_num, 0, 0);
1368 static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
1373 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
1377 sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
1378 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
1379 *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
1380 *number = (QM_SQC_VFT_NUM_MASK_v2 &
1381 (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
1386 static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num)
1388 u32 remain_q_num, vfq_num;
1389 u32 num_vfs = qm->vfs_num;
1391 vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs;
1392 if (vfq_num >= qm->max_qp_num)
1393 return qm->max_qp_num;
1395 remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs;
1396 if (vfq_num + remain_q_num <= qm->max_qp_num)
1397 return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num;
1400 * if vfq_num + remain_q_num > max_qp_num, the last VFs,
1401 * each with one more queue.
1403 return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num;
1406 static struct hisi_qm *file_to_qm(struct debugfs_file *file)
1408 struct qm_debug *debug = file->debug;
1410 return container_of(debug, struct hisi_qm, debug);
1413 static u32 current_q_read(struct hisi_qm *qm)
1415 return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
1418 static int current_q_write(struct hisi_qm *qm, u32 val)
1422 if (val >= qm->debug.curr_qm_qp_num)
1425 tmp = val << QM_DFX_QN_SHIFT |
1426 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
1427 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1429 tmp = val << QM_DFX_QN_SHIFT |
1430 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
1431 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1436 static u32 clear_enable_read(struct hisi_qm *qm)
1438 return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
1441 /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
1442 static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl)
1444 if (rd_clr_ctrl > 1)
1447 writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
1452 static u32 current_qm_read(struct hisi_qm *qm)
1454 return readl(qm->io_base + QM_DFX_MB_CNT_VF);
1457 static int current_qm_write(struct hisi_qm *qm, u32 val)
1461 if (val > qm->vfs_num)
1464 /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */
1466 qm->debug.curr_qm_qp_num = qm->qp_num;
1468 qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val);
1470 writel(val, qm->io_base + QM_DFX_MB_CNT_VF);
1471 writel(val, qm->io_base + QM_DFX_DB_CNT_VF);
1474 (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK);
1475 writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
1478 (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK);
1479 writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
1484 static ssize_t qm_debug_read(struct file *filp, char __user *buf,
1485 size_t count, loff_t *pos)
1487 struct debugfs_file *file = filp->private_data;
1488 enum qm_debug_file index = file->index;
1489 struct hisi_qm *qm = file_to_qm(file);
1490 char tbuf[QM_DBG_TMP_BUF_LEN];
1494 ret = hisi_qm_get_dfx_access(qm);
1498 mutex_lock(&file->lock);
1501 val = current_qm_read(qm);
1504 val = current_q_read(qm);
1507 val = clear_enable_read(qm);
1512 mutex_unlock(&file->lock);
1514 hisi_qm_put_dfx_access(qm);
1515 ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val);
1516 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
1519 mutex_unlock(&file->lock);
1520 hisi_qm_put_dfx_access(qm);
1524 static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
1525 size_t count, loff_t *pos)
1527 struct debugfs_file *file = filp->private_data;
1528 enum qm_debug_file index = file->index;
1529 struct hisi_qm *qm = file_to_qm(file);
1531 char tbuf[QM_DBG_TMP_BUF_LEN];
1537 if (count >= QM_DBG_TMP_BUF_LEN)
1540 len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
1546 if (kstrtoul(tbuf, 0, &val))
1549 ret = hisi_qm_get_dfx_access(qm);
1553 mutex_lock(&file->lock);
1556 ret = current_qm_write(qm, val);
1559 ret = current_q_write(qm, val);
1562 ret = clear_enable_write(qm, val);
1567 mutex_unlock(&file->lock);
1569 hisi_qm_put_dfx_access(qm);
1577 static const struct file_operations qm_debug_fops = {
1578 .owner = THIS_MODULE,
1579 .open = simple_open,
1580 .read = qm_debug_read,
1581 .write = qm_debug_write,
1584 #define CNT_CYC_REGS_NUM 10
1585 static const struct debugfs_reg32 qm_dfx_regs[] = {
1586 /* XXX_CNT are reading clear register */
1587 {"QM_ECC_1BIT_CNT ", 0x104000ull},
1588 {"QM_ECC_MBIT_CNT ", 0x104008ull},
1589 {"QM_DFX_MB_CNT ", 0x104018ull},
1590 {"QM_DFX_DB_CNT ", 0x104028ull},
1591 {"QM_DFX_SQE_CNT ", 0x104038ull},
1592 {"QM_DFX_CQE_CNT ", 0x104048ull},
1593 {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull},
1594 {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull},
1595 {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull},
1596 {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull},
1597 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1598 {"QM_ECC_1BIT_INF ", 0x104004ull},
1599 {"QM_ECC_MBIT_INF ", 0x10400cull},
1600 {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull},
1601 {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull},
1602 {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull},
1603 {"QM_DFX_FF_ST0 ", 0x1040c8ull},
1604 {"QM_DFX_FF_ST1 ", 0x1040ccull},
1605 {"QM_DFX_FF_ST2 ", 0x1040d0ull},
1606 {"QM_DFX_FF_ST3 ", 0x1040d4ull},
1607 {"QM_DFX_FF_ST4 ", 0x1040d8ull},
1608 {"QM_DFX_FF_ST5 ", 0x1040dcull},
1609 {"QM_DFX_FF_ST6 ", 0x1040e0ull},
1610 {"QM_IN_IDLE_ST ", 0x1040e4ull},
1613 static const struct debugfs_reg32 qm_vf_dfx_regs[] = {
1614 {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull},
1618 * hisi_qm_regs_dump() - Dump registers's value.
1619 * @s: debugfs file handle.
1620 * @regset: accelerator registers information.
1622 * Dump accelerator registers.
1624 void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset)
1626 struct pci_dev *pdev = to_pci_dev(regset->dev);
1627 struct hisi_qm *qm = pci_get_drvdata(pdev);
1628 const struct debugfs_reg32 *regs = regset->regs;
1629 int regs_len = regset->nregs;
1633 ret = hisi_qm_get_dfx_access(qm);
1637 for (i = 0; i < regs_len; i++) {
1638 val = readl(regset->base + regs[i].offset);
1639 seq_printf(s, "%s= 0x%08x\n", regs[i].name, val);
1642 hisi_qm_put_dfx_access(qm);
1644 EXPORT_SYMBOL_GPL(hisi_qm_regs_dump);
1646 static int qm_regs_show(struct seq_file *s, void *unused)
1648 struct hisi_qm *qm = s->private;
1649 struct debugfs_regset32 regset;
1651 if (qm->fun_type == QM_HW_PF) {
1652 regset.regs = qm_dfx_regs;
1653 regset.nregs = ARRAY_SIZE(qm_dfx_regs);
1655 regset.regs = qm_vf_dfx_regs;
1656 regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs);
1659 regset.base = qm->io_base;
1660 regset.dev = &qm->pdev->dev;
1662 hisi_qm_regs_dump(s, ®set);
1667 DEFINE_SHOW_ATTRIBUTE(qm_regs);
1669 static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm,
1670 const struct dfx_diff_registers *cregs, int reg_len)
1672 struct dfx_diff_registers *diff_regs;
1676 diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL);
1678 return ERR_PTR(-ENOMEM);
1680 for (i = 0; i < reg_len; i++) {
1681 if (!cregs[i].reg_len)
1684 diff_regs[i].reg_offset = cregs[i].reg_offset;
1685 diff_regs[i].reg_len = cregs[i].reg_len;
1686 diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len,
1688 if (!diff_regs[i].regs)
1691 for (j = 0; j < diff_regs[i].reg_len; j++) {
1692 base_offset = diff_regs[i].reg_offset +
1693 j * QM_DFX_REGS_LEN;
1694 diff_regs[i].regs[j] = readl(qm->io_base + base_offset);
1703 kfree(diff_regs[i].regs);
1706 return ERR_PTR(-ENOMEM);
1709 static void dfx_regs_uninit(struct hisi_qm *qm,
1710 struct dfx_diff_registers *dregs, int reg_len)
1714 /* Setting the pointer is NULL to prevent double free */
1715 for (i = 0; i < reg_len; i++) {
1716 kfree(dregs[i].regs);
1717 dregs[i].regs = NULL;
1724 * hisi_qm_diff_regs_init() - Allocate memory for registers.
1725 * @qm: device qm handle.
1726 * @dregs: diff registers handle.
1727 * @reg_len: diff registers region length.
1729 int hisi_qm_diff_regs_init(struct hisi_qm *qm,
1730 struct dfx_diff_registers *dregs, int reg_len)
1732 if (!qm || !dregs || reg_len <= 0)
1735 if (qm->fun_type != QM_HW_PF)
1738 qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs,
1739 ARRAY_SIZE(qm_diff_regs));
1740 if (IS_ERR(qm->debug.qm_diff_regs))
1741 return PTR_ERR(qm->debug.qm_diff_regs);
1743 qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len);
1744 if (IS_ERR(qm->debug.acc_diff_regs)) {
1745 dfx_regs_uninit(qm, qm->debug.qm_diff_regs,
1746 ARRAY_SIZE(qm_diff_regs));
1747 return PTR_ERR(qm->debug.acc_diff_regs);
1752 EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_init);
1755 * hisi_qm_diff_regs_uninit() - Free memory for registers.
1756 * @qm: device qm handle.
1757 * @reg_len: diff registers region length.
1759 void hisi_qm_diff_regs_uninit(struct hisi_qm *qm, int reg_len)
1761 if (!qm || reg_len <= 0 || qm->fun_type != QM_HW_PF)
1764 dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len);
1765 dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs));
1767 EXPORT_SYMBOL_GPL(hisi_qm_diff_regs_uninit);
1770 * hisi_qm_acc_diff_regs_dump() - Dump registers's value.
1771 * @qm: device qm handle.
1772 * @s: Debugfs file handle.
1773 * @dregs: diff registers handle.
1774 * @regs_len: diff registers region length.
1776 void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s,
1777 struct dfx_diff_registers *dregs, int regs_len)
1779 u32 j, val, base_offset;
1782 if (!qm || !s || !dregs || regs_len <= 0)
1785 ret = hisi_qm_get_dfx_access(qm);
1789 down_read(&qm->qps_lock);
1790 for (i = 0; i < regs_len; i++) {
1791 if (!dregs[i].reg_len)
1794 for (j = 0; j < dregs[i].reg_len; j++) {
1795 base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN;
1796 val = readl(qm->io_base + base_offset);
1797 if (val != dregs[i].regs[j])
1798 seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n",
1799 base_offset, dregs[i].regs[j], val);
1802 up_read(&qm->qps_lock);
1804 hisi_qm_put_dfx_access(qm);
1806 EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump);
1808 static int qm_diff_regs_show(struct seq_file *s, void *unused)
1810 struct hisi_qm *qm = s->private;
1812 hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs,
1813 ARRAY_SIZE(qm_diff_regs));
1817 DEFINE_SHOW_ATTRIBUTE(qm_diff_regs);
1819 static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
1820 size_t count, loff_t *pos)
1822 char buf[QM_DBG_READ_LEN];
1825 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
1826 "Please echo help to cmd to get help information");
1828 return simple_read_from_buffer(buffer, count, pos, buf, len);
1831 static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1832 dma_addr_t *dma_addr)
1834 struct device *dev = &qm->pdev->dev;
1837 ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
1839 return ERR_PTR(-ENOMEM);
1841 *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
1842 if (dma_mapping_error(dev, *dma_addr)) {
1843 dev_err(dev, "DMA mapping error!\n");
1845 return ERR_PTR(-ENOMEM);
1851 static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1852 const void *ctx_addr, dma_addr_t *dma_addr)
1854 struct device *dev = &qm->pdev->dev;
1856 dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
1860 static int dump_show(struct hisi_qm *qm, void *info,
1861 unsigned int info_size, char *info_name)
1863 struct device *dev = &qm->pdev->dev;
1864 u8 *info_buf, *info_curr = info;
1866 #define BYTE_PER_DW 4
1868 info_buf = kzalloc(info_size, GFP_KERNEL);
1872 for (i = 0; i < info_size; i++, info_curr++) {
1873 if (i % BYTE_PER_DW == 0)
1874 info_buf[i + 3UL] = *info_curr;
1875 else if (i % BYTE_PER_DW == 1)
1876 info_buf[i + 1UL] = *info_curr;
1877 else if (i % BYTE_PER_DW == 2)
1878 info_buf[i - 1] = *info_curr;
1879 else if (i % BYTE_PER_DW == 3)
1880 info_buf[i - 3] = *info_curr;
1883 dev_info(dev, "%s DUMP\n", info_name);
1884 for (i = 0; i < info_size; i += BYTE_PER_DW) {
1885 pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
1886 info_buf[i], info_buf[i + 1UL],
1887 info_buf[i + 2UL], info_buf[i + 3UL]);
1895 static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1897 return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1900 static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1902 return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1905 static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
1907 struct device *dev = &qm->pdev->dev;
1908 struct qm_sqc *sqc, *sqc_curr;
1916 ret = kstrtou32(s, 0, &qp_id);
1917 if (ret || qp_id >= qm->qp_num) {
1918 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
1922 sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
1924 return PTR_ERR(sqc);
1926 ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
1928 down_read(&qm->qps_lock);
1930 sqc_curr = qm->sqc + qp_id;
1932 ret = dump_show(qm, sqc_curr, sizeof(*sqc),
1935 dev_info(dev, "Show soft sqc failed!\n");
1937 up_read(&qm->qps_lock);
1942 ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
1944 dev_info(dev, "Show hw sqc failed!\n");
1947 qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
1951 static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
1953 struct device *dev = &qm->pdev->dev;
1954 struct qm_cqc *cqc, *cqc_curr;
1962 ret = kstrtou32(s, 0, &qp_id);
1963 if (ret || qp_id >= qm->qp_num) {
1964 dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1);
1968 cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
1970 return PTR_ERR(cqc);
1972 ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
1974 down_read(&qm->qps_lock);
1976 cqc_curr = qm->cqc + qp_id;
1978 ret = dump_show(qm, cqc_curr, sizeof(*cqc),
1981 dev_info(dev, "Show soft cqc failed!\n");
1983 up_read(&qm->qps_lock);
1988 ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
1990 dev_info(dev, "Show hw cqc failed!\n");
1993 qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
1997 static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
1998 int cmd, char *name)
2000 struct device *dev = &qm->pdev->dev;
2001 dma_addr_t xeqc_dma;
2005 if (strsep(&s, " ")) {
2006 dev_err(dev, "Please do not input extra characters!\n");
2010 xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
2012 return PTR_ERR(xeqc);
2014 ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1);
2018 ret = dump_show(qm, xeqc, size, name);
2020 dev_info(dev, "Show hw %s failed!\n", name);
2023 qm_ctx_free(qm, size, xeqc, &xeqc_dma);
2027 static int q_dump_param_parse(struct hisi_qm *qm, char *s,
2028 u32 *e_id, u32 *q_id)
2030 struct device *dev = &qm->pdev->dev;
2031 unsigned int qp_num = qm->qp_num;
2035 presult = strsep(&s, " ");
2037 dev_err(dev, "Please input qp number!\n");
2041 ret = kstrtou32(presult, 0, q_id);
2042 if (ret || *q_id >= qp_num) {
2043 dev_err(dev, "Please input qp num (0-%u)", qp_num - 1);
2047 presult = strsep(&s, " ");
2049 dev_err(dev, "Please input sqe number!\n");
2053 ret = kstrtou32(presult, 0, e_id);
2054 if (ret || *e_id >= QM_Q_DEPTH) {
2055 dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
2059 if (strsep(&s, " ")) {
2060 dev_err(dev, "Please do not input extra characters!\n");
2067 static int qm_sq_dump(struct hisi_qm *qm, char *s)
2069 struct device *dev = &qm->pdev->dev;
2070 void *sqe, *sqe_curr;
2075 ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
2079 sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
2083 qp = &qm->qp_array[qp_id];
2084 memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
2085 sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
2086 memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
2087 qm->debug.sqe_mask_len);
2089 ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
2091 dev_info(dev, "Show sqe failed!\n");
2098 static int qm_cq_dump(struct hisi_qm *qm, char *s)
2100 struct device *dev = &qm->pdev->dev;
2101 struct qm_cqe *cqe_curr;
2106 ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
2110 qp = &qm->qp_array[qp_id];
2111 cqe_curr = qp->cqe + cqe_id;
2112 ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
2114 dev_info(dev, "Show cqe failed!\n");
2119 static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
2120 size_t size, char *name)
2122 struct device *dev = &qm->pdev->dev;
2130 ret = kstrtou32(s, 0, &xeqe_id);
2134 if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
2135 dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
2137 } else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
2138 dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
2142 down_read(&qm->qps_lock);
2144 if (qm->eqe && !strcmp(name, "EQE")) {
2145 xeqe = qm->eqe + xeqe_id;
2146 } else if (qm->aeqe && !strcmp(name, "AEQE")) {
2147 xeqe = qm->aeqe + xeqe_id;
2153 ret = dump_show(qm, xeqe, size, name);
2155 dev_info(dev, "Show %s failed!\n", name);
2158 up_read(&qm->qps_lock);
2162 static int qm_dbg_help(struct hisi_qm *qm, char *s)
2164 struct device *dev = &qm->pdev->dev;
2166 if (strsep(&s, " ")) {
2167 dev_err(dev, "Please do not input extra characters!\n");
2171 dev_info(dev, "available commands:\n");
2172 dev_info(dev, "sqc <num>\n");
2173 dev_info(dev, "cqc <num>\n");
2174 dev_info(dev, "eqc\n");
2175 dev_info(dev, "aeqc\n");
2176 dev_info(dev, "sq <num> <e>\n");
2177 dev_info(dev, "cq <num> <e>\n");
2178 dev_info(dev, "eq <e>\n");
2179 dev_info(dev, "aeq <e>\n");
2184 static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
2186 struct device *dev = &qm->pdev->dev;
2187 char *presult, *s, *s_tmp;
2190 s = kstrdup(cmd_buf, GFP_KERNEL);
2195 presult = strsep(&s, " ");
2198 goto err_buffer_free;
2201 if (!strcmp(presult, "sqc"))
2202 ret = qm_sqc_dump(qm, s);
2203 else if (!strcmp(presult, "cqc"))
2204 ret = qm_cqc_dump(qm, s);
2205 else if (!strcmp(presult, "eqc"))
2206 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
2207 QM_MB_CMD_EQC, "EQC");
2208 else if (!strcmp(presult, "aeqc"))
2209 ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
2210 QM_MB_CMD_AEQC, "AEQC");
2211 else if (!strcmp(presult, "sq"))
2212 ret = qm_sq_dump(qm, s);
2213 else if (!strcmp(presult, "cq"))
2214 ret = qm_cq_dump(qm, s);
2215 else if (!strcmp(presult, "eq"))
2216 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
2217 else if (!strcmp(presult, "aeq"))
2218 ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
2219 else if (!strcmp(presult, "help"))
2220 ret = qm_dbg_help(qm, s);
2225 dev_info(dev, "Please echo help\n");
2233 static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
2234 size_t count, loff_t *pos)
2236 struct hisi_qm *qm = filp->private_data;
2237 char *cmd_buf, *cmd_buf_tmp;
2243 ret = hisi_qm_get_dfx_access(qm);
2247 /* Judge if the instance is being reset. */
2248 if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
2251 if (count > QM_DBG_WRITE_LEN) {
2253 goto put_dfx_access;
2256 cmd_buf = memdup_user_nul(buffer, count);
2257 if (IS_ERR(cmd_buf)) {
2258 ret = PTR_ERR(cmd_buf);
2259 goto put_dfx_access;
2262 cmd_buf_tmp = strchr(cmd_buf, '\n');
2264 *cmd_buf_tmp = '\0';
2265 count = cmd_buf_tmp - cmd_buf + 1;
2268 ret = qm_cmd_write_dump(qm, cmd_buf);
2271 goto put_dfx_access;
2279 hisi_qm_put_dfx_access(qm);
2283 static const struct file_operations qm_cmd_fops = {
2284 .owner = THIS_MODULE,
2285 .open = simple_open,
2286 .read = qm_cmd_read,
2287 .write = qm_cmd_write,
2290 static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir,
2291 enum qm_debug_file index)
2293 struct debugfs_file *file = qm->debug.files + index;
2295 debugfs_create_file(qm_debug_file_name[index], 0600, dir, file,
2298 file->index = index;
2299 mutex_init(&file->lock);
2300 file->debug = &qm->debug;
2303 static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
2305 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
2308 static void qm_hw_error_cfg(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
2310 qm->error_mask = ce | nfe | fe;
2311 /* clear QM hw residual error source */
2312 writel(QM_ABNORMAL_INT_SOURCE_CLR,
2313 qm->io_base + QM_ABNORMAL_INT_SOURCE);
2315 /* configure error type */
2316 writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
2317 writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
2318 writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
2319 writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
2322 static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
2324 u32 irq_enable = ce | nfe | fe;
2325 u32 irq_unmask = ~irq_enable;
2327 qm_hw_error_cfg(qm, ce, nfe, fe);
2329 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
2330 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
2333 static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
2335 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
2338 static void qm_hw_error_init_v3(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
2340 u32 irq_enable = ce | nfe | fe;
2341 u32 irq_unmask = ~irq_enable;
2343 qm_hw_error_cfg(qm, ce, nfe, fe);
2345 /* enable close master ooo when hardware error happened */
2346 writel(nfe & (~QM_DB_RANDOM_INVALID), qm->io_base + QM_OOO_SHUTDOWN_SEL);
2348 irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
2349 writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
2352 static void qm_hw_error_uninit_v3(struct hisi_qm *qm)
2354 writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
2356 /* disable close master ooo when hardware error happened */
2357 writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL);
2360 static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
2362 const struct hisi_qm_hw_error *err;
2363 struct device *dev = &qm->pdev->dev;
2364 u32 reg_val, type, vf_num;
2367 for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
2368 err = &qm_hw_error[i];
2369 if (!(err->int_msk & error_status))
2372 dev_err(dev, "%s [error status=0x%x] found\n",
2373 err->msg, err->int_msk);
2375 if (err->int_msk & QM_DB_TIMEOUT) {
2376 reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
2377 type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
2378 QM_DB_TIMEOUT_TYPE_SHIFT;
2379 vf_num = reg_val & QM_DB_TIMEOUT_VF;
2380 dev_err(dev, "qm %s doorbell timeout in function %u\n",
2381 qm_db_timeout[type], vf_num);
2382 } else if (err->int_msk & QM_OF_FIFO_OF) {
2383 reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
2384 type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
2385 QM_FIFO_OVERFLOW_TYPE_SHIFT;
2386 vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
2388 if (type < ARRAY_SIZE(qm_fifo_overflow))
2389 dev_err(dev, "qm %s fifo overflow in function %u\n",
2390 qm_fifo_overflow[type], vf_num);
2392 dev_err(dev, "unknown error type\n");
2397 static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
2399 u32 error_status, tmp, val;
2402 tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
2403 error_status = qm->error_mask & tmp;
2406 if (error_status & QM_ECC_MBIT)
2407 qm->err_status.is_qm_ecc_mbit = true;
2409 qm_log_hw_error(qm, error_status);
2410 val = error_status | QM_DB_RANDOM_INVALID | QM_BASE_CE;
2411 /* ce error does not need to be reset */
2412 if (val == (QM_DB_RANDOM_INVALID | QM_BASE_CE)) {
2413 writel(error_status, qm->io_base +
2414 QM_ABNORMAL_INT_SOURCE);
2415 writel(qm->err_info.nfe,
2416 qm->io_base + QM_RAS_NFE_ENABLE);
2417 return ACC_ERR_RECOVERED;
2420 return ACC_ERR_NEED_RESET;
2423 return ACC_ERR_RECOVERED;
2426 static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num)
2428 struct qm_mailbox mailbox;
2431 qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0);
2432 mutex_lock(&qm->mailbox_lock);
2433 ret = qm_mb_nolock(qm, &mailbox);
2437 *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
2438 ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
2441 mutex_unlock(&qm->mailbox_lock);
2445 static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask)
2449 if (qm->fun_type == QM_HW_PF)
2450 writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P);
2452 val = readl(qm->io_base + QM_IFC_INT_SOURCE_V);
2453 val |= QM_IFC_INT_SOURCE_MASK;
2454 writel(val, qm->io_base + QM_IFC_INT_SOURCE_V);
2457 static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id)
2459 struct device *dev = &qm->pdev->dev;
2464 ret = qm_get_mb_cmd(qm, &msg, vf_id);
2466 dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id);
2470 cmd = msg & QM_MB_CMD_DATA_MASK;
2472 case QM_VF_PREPARE_FAIL:
2473 dev_err(dev, "failed to stop VF(%u)!\n", vf_id);
2475 case QM_VF_START_FAIL:
2476 dev_err(dev, "failed to start VF(%u)!\n", vf_id);
2478 case QM_VF_PREPARE_DONE:
2479 case QM_VF_START_DONE:
2482 dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id);
2487 static int qm_wait_vf_prepare_finish(struct hisi_qm *qm)
2489 struct device *dev = &qm->pdev->dev;
2490 u32 vfs_num = qm->vfs_num;
2496 if (!qm->vfs_num || qm->ver < QM_HW_V3)
2500 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
2501 /* All VFs send command to PF, break */
2502 if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1))
2505 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
2510 msleep(QM_WAIT_DST_ACK);
2513 /* PF check VFs msg */
2514 for (i = 1; i <= vfs_num; i++) {
2516 qm_handle_vf_msg(qm, i);
2518 dev_err(dev, "VF(%u) not ping PF!\n", i);
2521 /* PF clear interrupt to ack VFs */
2522 qm_clear_cmd_interrupt(qm, val);
2527 static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num)
2531 val = readl(qm->io_base + QM_IFC_INT_CFG);
2532 val &= ~QM_IFC_SEND_ALL_VFS;
2534 writel(val, qm->io_base + QM_IFC_INT_CFG);
2536 val = readl(qm->io_base + QM_IFC_INT_SET_P);
2537 val |= QM_IFC_INT_SET_MASK;
2538 writel(val, qm->io_base + QM_IFC_INT_SET_P);
2541 static void qm_trigger_pf_interrupt(struct hisi_qm *qm)
2545 val = readl(qm->io_base + QM_IFC_INT_SET_V);
2546 val |= QM_IFC_INT_SET_MASK;
2547 writel(val, qm->io_base + QM_IFC_INT_SET_V);
2550 static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num)
2552 struct device *dev = &qm->pdev->dev;
2553 struct qm_mailbox mailbox;
2558 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0);
2559 mutex_lock(&qm->mailbox_lock);
2560 ret = qm_mb_nolock(qm, &mailbox);
2562 dev_err(dev, "failed to send command to vf(%u)!\n", fun_num);
2566 qm_trigger_vf_interrupt(qm, fun_num);
2568 msleep(QM_WAIT_DST_ACK);
2569 val = readq(qm->io_base + QM_IFC_READY_STATUS);
2570 /* if VF respond, PF notifies VF successfully. */
2571 if (!(val & BIT(fun_num)))
2574 if (++cnt > QM_MAX_PF_WAIT_COUNT) {
2575 dev_err(dev, "failed to get response from VF(%u)!\n", fun_num);
2582 mutex_unlock(&qm->mailbox_lock);
2586 static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd)
2588 struct device *dev = &qm->pdev->dev;
2589 u32 vfs_num = qm->vfs_num;
2590 struct qm_mailbox mailbox;
2596 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0);
2597 mutex_lock(&qm->mailbox_lock);
2598 /* PF sends command to all VFs by mailbox */
2599 ret = qm_mb_nolock(qm, &mailbox);
2601 dev_err(dev, "failed to send command to VFs!\n");
2602 mutex_unlock(&qm->mailbox_lock);
2606 qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS);
2608 msleep(QM_WAIT_DST_ACK);
2609 val = readq(qm->io_base + QM_IFC_READY_STATUS);
2610 /* If all VFs acked, PF notifies VFs successfully. */
2611 if (!(val & GENMASK(vfs_num, 1))) {
2612 mutex_unlock(&qm->mailbox_lock);
2616 if (++cnt > QM_MAX_PF_WAIT_COUNT)
2620 mutex_unlock(&qm->mailbox_lock);
2622 /* Check which vf respond timeout. */
2623 for (i = 1; i <= vfs_num; i++) {
2625 dev_err(dev, "failed to get response from VF(%u)!\n", i);
2631 static int qm_ping_pf(struct hisi_qm *qm, u64 cmd)
2633 struct qm_mailbox mailbox;
2638 qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0);
2639 mutex_lock(&qm->mailbox_lock);
2640 ret = qm_mb_nolock(qm, &mailbox);
2642 dev_err(&qm->pdev->dev, "failed to send command to PF!\n");
2646 qm_trigger_pf_interrupt(qm);
2647 /* Waiting for PF response */
2649 msleep(QM_WAIT_DST_ACK);
2650 val = readl(qm->io_base + QM_IFC_INT_SET_V);
2651 if (!(val & QM_IFC_INT_STATUS_MASK))
2654 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
2661 mutex_unlock(&qm->mailbox_lock);
2665 static int qm_stop_qp(struct hisi_qp *qp)
2667 return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0);
2670 static int qm_set_msi(struct hisi_qm *qm, bool set)
2672 struct pci_dev *pdev = qm->pdev;
2675 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
2678 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
2679 ACC_PEH_MSI_DISABLE);
2680 if (qm->err_status.is_qm_ecc_mbit ||
2681 qm->err_status.is_dev_ecc_mbit)
2685 if (readl(qm->io_base + QM_PEH_DFX_INFO0))
2692 static void qm_wait_msi_finish(struct hisi_qm *qm)
2694 struct pci_dev *pdev = qm->pdev;
2701 pci_read_config_dword(pdev, pdev->msi_cap +
2702 PCI_MSI_PENDING_64, &cmd);
2706 if (++cnt > MAX_WAIT_COUNTS) {
2707 pci_warn(pdev, "failed to empty MSI PENDING!\n");
2714 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0,
2715 val, !(val & QM_PEH_DFX_MASK),
2716 POLL_PERIOD, POLL_TIMEOUT);
2718 pci_warn(pdev, "failed to empty PEH MSI!\n");
2720 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1,
2721 val, !(val & QM_PEH_MSI_FINISH_MASK),
2722 POLL_PERIOD, POLL_TIMEOUT);
2724 pci_warn(pdev, "failed to finish MSI operation!\n");
2727 static int qm_set_msi_v3(struct hisi_qm *qm, bool set)
2729 struct pci_dev *pdev = qm->pdev;
2730 int ret = -ETIMEDOUT;
2733 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
2735 cmd |= QM_MSI_CAP_ENABLE;
2737 cmd &= ~QM_MSI_CAP_ENABLE;
2739 pci_write_config_dword(pdev, pdev->msi_cap, cmd);
2741 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
2742 pci_read_config_dword(pdev, pdev->msi_cap, &cmd);
2743 if (cmd & QM_MSI_CAP_ENABLE)
2749 udelay(WAIT_PERIOD_US_MIN);
2750 qm_wait_msi_finish(qm);
2757 static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
2759 .get_irq_num = qm_get_irq_num_v1,
2760 .hw_error_init = qm_hw_error_init_v1,
2761 .set_msi = qm_set_msi,
2764 static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
2765 .get_vft = qm_get_vft_v2,
2767 .get_irq_num = qm_get_irq_num_v2,
2768 .hw_error_init = qm_hw_error_init_v2,
2769 .hw_error_uninit = qm_hw_error_uninit_v2,
2770 .hw_error_handle = qm_hw_error_handle_v2,
2771 .set_msi = qm_set_msi,
2774 static const struct hisi_qm_hw_ops qm_hw_ops_v3 = {
2775 .get_vft = qm_get_vft_v2,
2777 .get_irq_num = qm_get_irq_num_v3,
2778 .hw_error_init = qm_hw_error_init_v3,
2779 .hw_error_uninit = qm_hw_error_uninit_v3,
2780 .hw_error_handle = qm_hw_error_handle_v2,
2781 .stop_qp = qm_stop_qp,
2782 .set_msi = qm_set_msi_v3,
2783 .ping_all_vfs = qm_ping_all_vfs,
2784 .ping_pf = qm_ping_pf,
2787 static void *qm_get_avail_sqe(struct hisi_qp *qp)
2789 struct hisi_qp_status *qp_status = &qp->qp_status;
2790 u16 sq_tail = qp_status->sq_tail;
2792 if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
2795 return qp->sqe + sq_tail * qp->qm->sqe_size;
2798 static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
2802 /* Use last 64 bits of DUS to reset status. */
2803 addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET;
2807 static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
2809 struct device *dev = &qm->pdev->dev;
2813 if (!qm_qp_avail_state(qm, NULL, QP_INIT))
2814 return ERR_PTR(-EPERM);
2816 if (qm->qp_in_used == qm->qp_num) {
2817 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
2819 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2820 return ERR_PTR(-EBUSY);
2823 qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
2825 dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
2827 atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
2828 return ERR_PTR(-EBUSY);
2831 qp = &qm->qp_array[qp_id];
2832 hisi_qm_unset_hw_reset(qp);
2833 memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
2835 qp->event_cb = NULL;
2838 qp->alg_type = alg_type;
2839 qp->is_in_kernel = true;
2841 atomic_set(&qp->qp_status.flags, QP_INIT);
2847 * hisi_qm_create_qp() - Create a queue pair from qm.
2848 * @qm: The qm we create a qp from.
2849 * @alg_type: Accelerator specific algorithm type in sqc.
2851 * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
2854 static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
2859 ret = qm_pm_get_sync(qm);
2861 return ERR_PTR(ret);
2863 down_write(&qm->qps_lock);
2864 qp = qm_create_qp_nolock(qm, alg_type);
2865 up_write(&qm->qps_lock);
2874 * hisi_qm_release_qp() - Release a qp back to its qm.
2875 * @qp: The qp we want to release.
2877 * This function releases the resource of a qp.
2879 static void hisi_qm_release_qp(struct hisi_qp *qp)
2881 struct hisi_qm *qm = qp->qm;
2883 down_write(&qm->qps_lock);
2885 if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
2886 up_write(&qm->qps_lock);
2891 idr_remove(&qm->qp_idr, qp->qp_id);
2893 up_write(&qm->qps_lock);
2898 static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2900 struct hisi_qm *qm = qp->qm;
2901 struct device *dev = &qm->pdev->dev;
2902 enum qm_hw_ver ver = qm->ver;
2907 sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
2911 INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
2912 if (ver == QM_HW_V1) {
2913 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
2914 sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
2916 sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
2917 sqc->w8 = 0; /* rand_qc */
2919 sqc->cq_num = cpu_to_le16(qp_id);
2920 sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
2922 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2923 sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE <<
2924 QM_QC_PASID_ENABLE_SHIFT);
2926 sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
2928 if (dma_mapping_error(dev, sqc_dma)) {
2933 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
2934 dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
2940 static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2942 struct hisi_qm *qm = qp->qm;
2943 struct device *dev = &qm->pdev->dev;
2944 enum qm_hw_ver ver = qm->ver;
2949 cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
2953 INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
2954 if (ver == QM_HW_V1) {
2955 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0,
2957 cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
2959 cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE));
2960 cqc->w8 = 0; /* rand_qc */
2962 cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
2964 if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel)
2965 cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE);
2967 cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
2969 if (dma_mapping_error(dev, cqc_dma)) {
2974 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
2975 dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
2981 static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
2985 qm_init_qp_status(qp);
2987 ret = qm_sq_ctx_cfg(qp, qp_id, pasid);
2991 return qm_cq_ctx_cfg(qp, qp_id, pasid);
2994 static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
2996 struct hisi_qm *qm = qp->qm;
2997 struct device *dev = &qm->pdev->dev;
2998 int qp_id = qp->qp_id;
3002 if (!qm_qp_avail_state(qm, qp, QP_START))
3005 ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
3009 atomic_set(&qp->qp_status.flags, QP_START);
3010 dev_dbg(dev, "queue %d started\n", qp_id);
3016 * hisi_qm_start_qp() - Start a qp into running.
3017 * @qp: The qp we want to start to run.
3018 * @arg: Accelerator specific argument.
3020 * After this function, qp can receive request from user. Return 0 if
3021 * successful, Return -EBUSY if failed.
3023 int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
3025 struct hisi_qm *qm = qp->qm;
3028 down_write(&qm->qps_lock);
3029 ret = qm_start_qp_nolock(qp, arg);
3030 up_write(&qm->qps_lock);
3034 EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
3037 * qp_stop_fail_cb() - call request cb.
3038 * @qp: stopped failed qp.
3040 * Callback function should be called whether task completed or not.
3042 static void qp_stop_fail_cb(struct hisi_qp *qp)
3044 int qp_used = atomic_read(&qp->qp_status.used);
3045 u16 cur_tail = qp->qp_status.sq_tail;
3046 u16 cur_head = (cur_tail + QM_Q_DEPTH - qp_used) % QM_Q_DEPTH;
3047 struct hisi_qm *qm = qp->qm;
3051 for (i = 0; i < qp_used; i++) {
3052 pos = (i + cur_head) % QM_Q_DEPTH;
3053 qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos));
3054 atomic_dec(&qp->qp_status.used);
3059 * qm_drain_qp() - Drain a qp.
3060 * @qp: The qp we want to drain.
3062 * Determine whether the queue is cleared by judging the tail pointers of
3065 static int qm_drain_qp(struct hisi_qp *qp)
3067 size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
3068 struct hisi_qm *qm = qp->qm;
3069 struct device *dev = &qm->pdev->dev;
3072 dma_addr_t dma_addr;
3076 /* No need to judge if master OOO is blocked. */
3077 if (qm_check_dev_error(qm))
3080 /* Kunpeng930 supports drain qp by device */
3081 if (qm->ops->stop_qp) {
3082 ret = qm->ops->stop_qp(qp);
3084 dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id);
3088 addr = qm_ctx_alloc(qm, size, &dma_addr);
3090 dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
3095 ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
3097 dev_err_ratelimited(dev, "Failed to dump sqc!\n");
3102 ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
3105 dev_err_ratelimited(dev, "Failed to dump cqc!\n");
3108 cqc = addr + sizeof(struct qm_sqc);
3110 if ((sqc->tail == cqc->tail) &&
3111 (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
3114 if (i == MAX_WAIT_COUNTS) {
3115 dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
3120 usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
3123 qm_ctx_free(qm, size, addr, &dma_addr);
3128 static int qm_stop_qp_nolock(struct hisi_qp *qp)
3130 struct device *dev = &qp->qm->pdev->dev;
3134 * It is allowed to stop and release qp when reset, If the qp is
3135 * stopped when reset but still want to be released then, the
3136 * is_resetting flag should be set negative so that this qp will not
3137 * be restarted after reset.
3139 if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
3140 qp->is_resetting = false;
3144 if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
3147 atomic_set(&qp->qp_status.flags, QP_STOP);
3149 ret = qm_drain_qp(qp);
3151 dev_err(dev, "Failed to drain out data for stopping!\n");
3154 flush_workqueue(qp->qm->wq);
3155 if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used)))
3156 qp_stop_fail_cb(qp);
3158 dev_dbg(dev, "stop queue %u!", qp->qp_id);
3164 * hisi_qm_stop_qp() - Stop a qp in qm.
3165 * @qp: The qp we want to stop.
3167 * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
3169 int hisi_qm_stop_qp(struct hisi_qp *qp)
3173 down_write(&qp->qm->qps_lock);
3174 ret = qm_stop_qp_nolock(qp);
3175 up_write(&qp->qm->qps_lock);
3179 EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
3182 * hisi_qp_send() - Queue up a task in the hardware queue.
3183 * @qp: The qp in which to put the message.
3184 * @msg: The message.
3186 * This function will return -EBUSY if qp is currently full, and -EAGAIN
3187 * if qp related qm is resetting.
3189 * Note: This function may run with qm_irq_thread and ACC reset at same time.
3190 * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
3191 * reset may happen, we have no lock here considering performance. This
3192 * causes current qm_db sending fail or can not receive sended sqe. QM
3193 * sync/async receive function should handle the error sqe. ACC reset
3194 * done function should clear used sqe to 0.
3196 int hisi_qp_send(struct hisi_qp *qp, const void *msg)
3198 struct hisi_qp_status *qp_status = &qp->qp_status;
3199 u16 sq_tail = qp_status->sq_tail;
3200 u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
3201 void *sqe = qm_get_avail_sqe(qp);
3203 if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
3204 atomic_read(&qp->qm->status.flags) == QM_STOP ||
3205 qp->is_resetting)) {
3206 dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
3213 memcpy(sqe, msg, qp->qm->sqe_size);
3215 qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
3216 atomic_inc(&qp->qp_status.used);
3217 qp_status->sq_tail = sq_tail_next;
3221 EXPORT_SYMBOL_GPL(hisi_qp_send);
3223 static void hisi_qm_cache_wb(struct hisi_qm *qm)
3227 if (qm->ver == QM_HW_V1)
3230 writel(0x1, qm->io_base + QM_CACHE_WB_START);
3231 if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
3232 val, val & BIT(0), POLL_PERIOD,
3234 dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
3237 static void qm_qp_event_notifier(struct hisi_qp *qp)
3239 wake_up_interruptible(&qp->uacce_q->wait);
3242 /* This function returns free number of qp in qm. */
3243 static int hisi_qm_get_available_instances(struct uacce_device *uacce)
3245 struct hisi_qm *qm = uacce->priv;
3248 down_read(&qm->qps_lock);
3249 ret = qm->qp_num - qm->qp_in_used;
3250 up_read(&qm->qps_lock);
3255 static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
3259 for (i = 0; i < qm->qp_num; i++)
3260 qm_set_qp_disable(&qm->qp_array[i], offset);
3263 static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
3265 struct uacce_queue *q)
3267 struct hisi_qm *qm = uacce->priv;
3271 qp = hisi_qm_create_qp(qm, alg_type);
3278 qp->event_cb = qm_qp_event_notifier;
3280 qp->is_in_kernel = false;
3285 static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
3287 struct hisi_qp *qp = q->priv;
3289 hisi_qm_cache_wb(qp->qm);
3290 hisi_qm_release_qp(qp);
3293 /* map sq/cq/doorbell to user space */
3294 static int hisi_qm_uacce_mmap(struct uacce_queue *q,
3295 struct vm_area_struct *vma,
3296 struct uacce_qfile_region *qfr)
3298 struct hisi_qp *qp = q->priv;
3299 struct hisi_qm *qm = qp->qm;
3300 resource_size_t phys_base = qm->db_phys_base +
3301 qp->qp_id * qm->db_interval;
3302 size_t sz = vma->vm_end - vma->vm_start;
3303 struct pci_dev *pdev = qm->pdev;
3304 struct device *dev = &pdev->dev;
3305 unsigned long vm_pgoff;
3308 switch (qfr->type) {
3309 case UACCE_QFRT_MMIO:
3310 if (qm->ver == QM_HW_V1) {
3311 if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
3313 } else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation) {
3314 if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
3315 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
3318 if (sz > qm->db_interval)
3322 vma->vm_flags |= VM_IO;
3324 return remap_pfn_range(vma, vma->vm_start,
3325 phys_base >> PAGE_SHIFT,
3326 sz, pgprot_noncached(vma->vm_page_prot));
3327 case UACCE_QFRT_DUS:
3328 if (sz != qp->qdma.size)
3332 * dma_mmap_coherent() requires vm_pgoff as 0
3333 * restore vm_pfoff to initial value for mmap()
3335 vm_pgoff = vma->vm_pgoff;
3337 ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
3339 vma->vm_pgoff = vm_pgoff;
3347 static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
3349 struct hisi_qp *qp = q->priv;
3351 return hisi_qm_start_qp(qp, qp->pasid);
3354 static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
3356 hisi_qm_stop_qp(q->priv);
3359 static int hisi_qm_is_q_updated(struct uacce_queue *q)
3361 struct hisi_qp *qp = q->priv;
3362 struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
3365 while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
3366 /* make sure to read data from memory */
3368 qm_cq_head_update(qp);
3369 cqe = qp->cqe + qp->qp_status.cq_head;
3376 static void qm_set_sqctype(struct uacce_queue *q, u16 type)
3378 struct hisi_qm *qm = q->uacce->priv;
3379 struct hisi_qp *qp = q->priv;
3381 down_write(&qm->qps_lock);
3382 qp->alg_type = type;
3383 up_write(&qm->qps_lock);
3386 static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
3389 struct hisi_qp *qp = q->priv;
3390 struct hisi_qp_ctx qp_ctx;
3392 if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
3393 if (copy_from_user(&qp_ctx, (void __user *)arg,
3394 sizeof(struct hisi_qp_ctx)))
3397 if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
3400 qm_set_sqctype(q, qp_ctx.qc_type);
3401 qp_ctx.id = qp->qp_id;
3403 if (copy_to_user((void __user *)arg, &qp_ctx,
3404 sizeof(struct hisi_qp_ctx)))
3413 static const struct uacce_ops uacce_qm_ops = {
3414 .get_available_instances = hisi_qm_get_available_instances,
3415 .get_queue = hisi_qm_uacce_get_queue,
3416 .put_queue = hisi_qm_uacce_put_queue,
3417 .start_queue = hisi_qm_uacce_start_queue,
3418 .stop_queue = hisi_qm_uacce_stop_queue,
3419 .mmap = hisi_qm_uacce_mmap,
3420 .ioctl = hisi_qm_uacce_ioctl,
3421 .is_q_updated = hisi_qm_is_q_updated,
3424 static int qm_alloc_uacce(struct hisi_qm *qm)
3426 struct pci_dev *pdev = qm->pdev;
3427 struct uacce_device *uacce;
3428 unsigned long mmio_page_nr;
3429 unsigned long dus_page_nr;
3430 struct uacce_interface interface = {
3431 .flags = UACCE_DEV_SVA,
3432 .ops = &uacce_qm_ops,
3436 ret = strscpy(interface.name, dev_driver_string(&pdev->dev),
3437 sizeof(interface.name));
3439 return -ENAMETOOLONG;
3441 uacce = uacce_alloc(&pdev->dev, &interface);
3443 return PTR_ERR(uacce);
3445 if (uacce->flags & UACCE_DEV_SVA) {
3448 /* only consider sva case */
3449 uacce_remove(uacce);
3454 uacce->is_vf = pdev->is_virtfn;
3456 uacce->algs = qm->algs;
3458 if (qm->ver == QM_HW_V1)
3459 uacce->api_ver = HISI_QM_API_VER_BASE;
3460 else if (qm->ver == QM_HW_V2)
3461 uacce->api_ver = HISI_QM_API_VER2_BASE;
3463 uacce->api_ver = HISI_QM_API_VER3_BASE;
3465 if (qm->ver == QM_HW_V1)
3466 mmio_page_nr = QM_DOORBELL_PAGE_NR;
3467 else if (qm->ver == QM_HW_V2 || !qm->use_db_isolation)
3468 mmio_page_nr = QM_DOORBELL_PAGE_NR +
3469 QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
3471 mmio_page_nr = qm->db_interval / PAGE_SIZE;
3473 /* Add one more page for device or qp status */
3474 dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
3475 sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >>
3478 uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
3479 uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
3487 * qm_frozen() - Try to froze QM to cut continuous queue request. If
3488 * there is user on the QM, return failure without doing anything.
3489 * @qm: The qm needed to be fronzen.
3491 * This function frozes QM, then we can do SRIOV disabling.
3493 static int qm_frozen(struct hisi_qm *qm)
3495 if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl))
3498 down_write(&qm->qps_lock);
3500 if (!qm->qp_in_used) {
3501 qm->qp_in_used = qm->qp_num;
3502 up_write(&qm->qps_lock);
3503 set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl);
3507 up_write(&qm->qps_lock);
3512 static int qm_try_frozen_vfs(struct pci_dev *pdev,
3513 struct hisi_qm_list *qm_list)
3515 struct hisi_qm *qm, *vf_qm;
3516 struct pci_dev *dev;
3519 if (!qm_list || !pdev)
3522 /* Try to frozen all the VFs as disable SRIOV */
3523 mutex_lock(&qm_list->lock);
3524 list_for_each_entry(qm, &qm_list->list, list) {
3528 if (pci_physfn(dev) == pdev) {
3529 vf_qm = pci_get_drvdata(dev);
3530 ret = qm_frozen(vf_qm);
3537 mutex_unlock(&qm_list->lock);
3543 * hisi_qm_wait_task_finish() - Wait until the task is finished
3544 * when removing the driver.
3545 * @qm: The qm needed to wait for the task to finish.
3546 * @qm_list: The list of all available devices.
3548 void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
3550 while (qm_frozen(qm) ||
3551 ((qm->fun_type == QM_HW_PF) &&
3552 qm_try_frozen_vfs(qm->pdev, qm_list))) {
3553 msleep(WAIT_PERIOD);
3556 while (test_bit(QM_RST_SCHED, &qm->misc_ctl) ||
3557 test_bit(QM_RESETTING, &qm->misc_ctl))
3558 msleep(WAIT_PERIOD);
3560 udelay(REMOVE_WAIT_DELAY);
3562 EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
3564 static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
3566 struct device *dev = &qm->pdev->dev;
3567 struct qm_dma *qdma;
3570 for (i = num - 1; i >= 0; i--) {
3571 qdma = &qm->qp_array[i].qdma;
3572 dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
3573 kfree(qm->poll_data[i].qp_finish_id);
3576 kfree(qm->poll_data);
3577 kfree(qm->qp_array);
3580 static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
3582 struct device *dev = &qm->pdev->dev;
3583 size_t off = qm->sqe_size * QM_Q_DEPTH;
3587 qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16),
3589 if (!qm->poll_data[id].qp_finish_id)
3592 qp = &qm->qp_array[id];
3593 qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
3596 goto err_free_qp_finish_id;
3598 qp->sqe = qp->qdma.va;
3599 qp->sqe_dma = qp->qdma.dma;
3600 qp->cqe = qp->qdma.va + off;
3601 qp->cqe_dma = qp->qdma.dma + off;
3602 qp->qdma.size = dma_size;
3608 err_free_qp_finish_id:
3609 kfree(qm->poll_data[id].qp_finish_id);
3613 static void hisi_qm_pre_init(struct hisi_qm *qm)
3615 struct pci_dev *pdev = qm->pdev;
3617 if (qm->ver == QM_HW_V1)
3618 qm->ops = &qm_hw_ops_v1;
3619 else if (qm->ver == QM_HW_V2)
3620 qm->ops = &qm_hw_ops_v2;
3622 qm->ops = &qm_hw_ops_v3;
3624 pci_set_drvdata(pdev, qm);
3625 mutex_init(&qm->mailbox_lock);
3626 init_rwsem(&qm->qps_lock);
3628 qm->misc_ctl = false;
3629 if (qm->fun_type == QM_HW_PF && qm->ver > QM_HW_V2) {
3630 if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev)))
3631 dev_info(&pdev->dev, "_PS0 and _PR0 are not defined");
3635 static void qm_cmd_uninit(struct hisi_qm *qm)
3639 if (qm->ver < QM_HW_V3)
3642 val = readl(qm->io_base + QM_IFC_INT_MASK);
3643 val |= QM_IFC_INT_DISABLE;
3644 writel(val, qm->io_base + QM_IFC_INT_MASK);
3647 static void qm_cmd_init(struct hisi_qm *qm)
3651 if (qm->ver < QM_HW_V3)
3654 /* Clear communication interrupt source */
3655 qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR);
3657 /* Enable pf to vf communication reg. */
3658 val = readl(qm->io_base + QM_IFC_INT_MASK);
3659 val &= ~QM_IFC_INT_DISABLE;
3660 writel(val, qm->io_base + QM_IFC_INT_MASK);
3663 static void qm_put_pci_res(struct hisi_qm *qm)
3665 struct pci_dev *pdev = qm->pdev;
3667 if (qm->use_db_isolation)
3668 iounmap(qm->db_io_base);
3670 iounmap(qm->io_base);
3671 pci_release_mem_regions(pdev);
3674 static void hisi_qm_pci_uninit(struct hisi_qm *qm)
3676 struct pci_dev *pdev = qm->pdev;
3678 pci_free_irq_vectors(pdev);
3680 pci_disable_device(pdev);
3683 static void hisi_qm_set_state(struct hisi_qm *qm, u8 state)
3685 if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF)
3686 writel(state, qm->io_base + QM_VF_STATE);
3689 static void qm_last_regs_uninit(struct hisi_qm *qm)
3691 struct qm_debug *debug = &qm->debug;
3693 if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
3696 kfree(debug->qm_last_words);
3697 debug->qm_last_words = NULL;
3700 static void hisi_qm_unint_work(struct hisi_qm *qm)
3702 destroy_workqueue(qm->wq);
3705 static void hisi_qm_memory_uninit(struct hisi_qm *qm)
3707 struct device *dev = &qm->pdev->dev;
3709 hisi_qp_memory_uninit(qm, qm->qp_num);
3711 hisi_qm_cache_wb(qm);
3712 dma_free_coherent(dev, qm->qdma.size,
3713 qm->qdma.va, qm->qdma.dma);
3716 idr_destroy(&qm->qp_idr);
3721 * hisi_qm_uninit() - Uninitialize qm.
3722 * @qm: The qm needed uninit.
3724 * This function uninits qm related device resources.
3726 void hisi_qm_uninit(struct hisi_qm *qm)
3728 qm_last_regs_uninit(qm);
3731 hisi_qm_unint_work(qm);
3732 down_write(&qm->qps_lock);
3734 if (!qm_avail_state(qm, QM_CLOSE)) {
3735 up_write(&qm->qps_lock);
3739 hisi_qm_memory_uninit(qm);
3740 hisi_qm_set_state(qm, QM_NOT_READY);
3741 up_write(&qm->qps_lock);
3743 qm_irq_unregister(qm);
3744 hisi_qm_pci_uninit(qm);
3746 uacce_remove(qm->uacce);
3750 EXPORT_SYMBOL_GPL(hisi_qm_uninit);
3753 * hisi_qm_get_vft() - Get vft from a qm.
3754 * @qm: The qm we want to get its vft.
3755 * @base: The base number of queue in vft.
3756 * @number: The number of queues in vft.
3758 * We can allocate multiple queues to a qm by configuring virtual function
3759 * table. We get related configures by this function. Normally, we call this
3760 * function in VF driver to get the queue information.
3762 * qm hw v1 does not support this interface.
3764 static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
3766 if (!base || !number)
3769 if (!qm->ops->get_vft) {
3770 dev_err(&qm->pdev->dev, "Don't support vft read!\n");
3774 return qm->ops->get_vft(qm, base, number);
3778 * hisi_qm_set_vft() - Set vft to a qm.
3779 * @qm: The qm we want to set its vft.
3780 * @fun_num: The function number.
3781 * @base: The base number of queue in vft.
3782 * @number: The number of queues in vft.
3784 * This function is alway called in PF driver, it is used to assign queues
3787 * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
3788 * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
3789 * (VF function number 0x2)
3791 static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
3794 u32 max_q_num = qm->ctrl_qp_num;
3796 if (base >= max_q_num || number > max_q_num ||
3797 (base + number) > max_q_num)
3800 return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
3803 static void qm_init_eq_aeq_status(struct hisi_qm *qm)
3805 struct hisi_qm_status *status = &qm->status;
3807 status->eq_head = 0;
3808 status->aeq_head = 0;
3809 status->eqc_phase = true;
3810 status->aeqc_phase = true;
3813 static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm)
3815 /* Clear eq/aeq interrupt source */
3816 qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
3817 qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
3819 writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
3820 writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
3823 static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm)
3825 writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
3826 writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
3829 static int qm_eq_ctx_cfg(struct hisi_qm *qm)
3831 struct device *dev = &qm->pdev->dev;
3836 eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
3840 eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
3841 eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
3842 if (qm->ver == QM_HW_V1)
3843 eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
3844 eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
3846 eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
3848 if (dma_mapping_error(dev, eqc_dma)) {
3853 ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
3854 dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
3860 static int qm_aeq_ctx_cfg(struct hisi_qm *qm)
3862 struct device *dev = &qm->pdev->dev;
3863 struct qm_aeqc *aeqc;
3864 dma_addr_t aeqc_dma;
3867 aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
3871 aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
3872 aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
3873 aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
3875 aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
3877 if (dma_mapping_error(dev, aeqc_dma)) {
3882 ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
3883 dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
3889 static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm)
3891 struct device *dev = &qm->pdev->dev;
3894 qm_init_eq_aeq_status(qm);
3896 ret = qm_eq_ctx_cfg(qm);
3898 dev_err(dev, "Set eqc failed!\n");
3902 return qm_aeq_ctx_cfg(qm);
3905 static int __hisi_qm_start(struct hisi_qm *qm)
3909 WARN_ON(!qm->qdma.va);
3911 if (qm->fun_type == QM_HW_PF) {
3912 ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
3917 ret = qm_eq_aeq_ctx_cfg(qm);
3921 ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
3925 ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
3929 qm_init_prefetch(qm);
3930 qm_enable_eq_aeq_interrupts(qm);
3936 * hisi_qm_start() - start qm
3937 * @qm: The qm to be started.
3939 * This function starts a qm, then we can allocate qp from this qm.
3941 int hisi_qm_start(struct hisi_qm *qm)
3943 struct device *dev = &qm->pdev->dev;
3946 down_write(&qm->qps_lock);
3948 if (!qm_avail_state(qm, QM_START)) {
3949 up_write(&qm->qps_lock);
3953 dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num);
3956 dev_err(dev, "qp_num should not be 0\n");
3961 ret = __hisi_qm_start(qm);
3963 atomic_set(&qm->status.flags, QM_START);
3965 hisi_qm_set_state(qm, QM_READY);
3967 up_write(&qm->qps_lock);
3970 EXPORT_SYMBOL_GPL(hisi_qm_start);
3972 static int qm_restart(struct hisi_qm *qm)
3974 struct device *dev = &qm->pdev->dev;
3978 ret = hisi_qm_start(qm);
3982 down_write(&qm->qps_lock);
3983 for (i = 0; i < qm->qp_num; i++) {
3984 qp = &qm->qp_array[i];
3985 if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
3986 qp->is_resetting == true) {
3987 ret = qm_start_qp_nolock(qp, 0);
3989 dev_err(dev, "Failed to start qp%d!\n", i);
3991 up_write(&qm->qps_lock);
3994 qp->is_resetting = false;
3997 up_write(&qm->qps_lock);
4002 /* Stop started qps in reset flow */
4003 static int qm_stop_started_qp(struct hisi_qm *qm)
4005 struct device *dev = &qm->pdev->dev;
4009 for (i = 0; i < qm->qp_num; i++) {
4010 qp = &qm->qp_array[i];
4011 if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
4012 qp->is_resetting = true;
4013 ret = qm_stop_qp_nolock(qp);
4015 dev_err(dev, "Failed to stop qp%d!\n", i);
4026 * qm_clear_queues() - Clear all queues memory in a qm.
4027 * @qm: The qm in which the queues will be cleared.
4029 * This function clears all queues memory in a qm. Reset of accelerator can
4030 * use this to clear queues.
4032 static void qm_clear_queues(struct hisi_qm *qm)
4037 for (i = 0; i < qm->qp_num; i++) {
4038 qp = &qm->qp_array[i];
4039 if (qp->is_in_kernel && qp->is_resetting)
4040 memset(qp->qdma.va, 0, qp->qdma.size);
4043 memset(qm->qdma.va, 0, qm->qdma.size);
4047 * hisi_qm_stop() - Stop a qm.
4048 * @qm: The qm which will be stopped.
4049 * @r: The reason to stop qm.
4051 * This function stops qm and its qps, then qm can not accept request.
4052 * Related resources are not released at this state, we can use hisi_qm_start
4053 * to let qm start again.
4055 int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
4057 struct device *dev = &qm->pdev->dev;
4060 down_write(&qm->qps_lock);
4062 qm->status.stop_reason = r;
4063 if (!qm_avail_state(qm, QM_STOP)) {
4068 if (qm->status.stop_reason == QM_SOFT_RESET ||
4069 qm->status.stop_reason == QM_FLR) {
4070 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
4071 ret = qm_stop_started_qp(qm);
4073 dev_err(dev, "Failed to stop started qp!\n");
4076 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
4079 qm_disable_eq_aeq_interrupts(qm);
4080 if (qm->fun_type == QM_HW_PF) {
4081 ret = hisi_qm_set_vft(qm, 0, 0, 0);
4083 dev_err(dev, "Failed to set vft!\n");
4089 qm_clear_queues(qm);
4090 atomic_set(&qm->status.flags, QM_STOP);
4093 up_write(&qm->qps_lock);
4096 EXPORT_SYMBOL_GPL(hisi_qm_stop);
4098 static ssize_t qm_status_read(struct file *filp, char __user *buffer,
4099 size_t count, loff_t *pos)
4101 struct hisi_qm *qm = filp->private_data;
4102 char buf[QM_DBG_READ_LEN];
4105 val = atomic_read(&qm->status.flags);
4106 len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
4108 return simple_read_from_buffer(buffer, count, pos, buf, len);
4111 static const struct file_operations qm_status_fops = {
4112 .owner = THIS_MODULE,
4113 .open = simple_open,
4114 .read = qm_status_read,
4117 static int qm_debugfs_atomic64_set(void *data, u64 val)
4122 atomic64_set((atomic64_t *)data, 0);
4127 static int qm_debugfs_atomic64_get(void *data, u64 *val)
4129 *val = atomic64_read((atomic64_t *)data);
4134 DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
4135 qm_debugfs_atomic64_set, "%llu\n");
4137 static void qm_hw_error_init(struct hisi_qm *qm)
4139 struct hisi_qm_err_info *err_info = &qm->err_info;
4141 if (!qm->ops->hw_error_init) {
4142 dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
4146 qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
4149 static void qm_hw_error_uninit(struct hisi_qm *qm)
4151 if (!qm->ops->hw_error_uninit) {
4152 dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
4156 qm->ops->hw_error_uninit(qm);
4159 static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
4161 if (!qm->ops->hw_error_handle) {
4162 dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
4163 return ACC_ERR_NONE;
4166 return qm->ops->hw_error_handle(qm);
4170 * hisi_qm_dev_err_init() - Initialize device error configuration.
4171 * @qm: The qm for which we want to do error initialization.
4173 * Initialize QM and device error related configuration.
4175 void hisi_qm_dev_err_init(struct hisi_qm *qm)
4177 if (qm->fun_type == QM_HW_VF)
4180 qm_hw_error_init(qm);
4182 if (!qm->err_ini->hw_err_enable) {
4183 dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
4186 qm->err_ini->hw_err_enable(qm);
4188 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
4191 * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
4192 * @qm: The qm for which we want to do error uninitialization.
4194 * Uninitialize QM and device error related configuration.
4196 void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
4198 if (qm->fun_type == QM_HW_VF)
4201 qm_hw_error_uninit(qm);
4203 if (!qm->err_ini->hw_err_disable) {
4204 dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
4207 qm->err_ini->hw_err_disable(qm);
4209 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
4212 * hisi_qm_free_qps() - free multiple queue pairs.
4213 * @qps: The queue pairs need to be freed.
4214 * @qp_num: The num of queue pairs.
4216 void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
4220 if (!qps || qp_num <= 0)
4223 for (i = qp_num - 1; i >= 0; i--)
4224 hisi_qm_release_qp(qps[i]);
4226 EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
4228 static void free_list(struct list_head *head)
4230 struct hisi_qm_resource *res, *tmp;
4232 list_for_each_entry_safe(res, tmp, head, list) {
4233 list_del(&res->list);
4238 static int hisi_qm_sort_devices(int node, struct list_head *head,
4239 struct hisi_qm_list *qm_list)
4241 struct hisi_qm_resource *res, *tmp;
4243 struct list_head *n;
4247 list_for_each_entry(qm, &qm_list->list, list) {
4248 dev = &qm->pdev->dev;
4250 if (IS_ENABLED(CONFIG_NUMA)) {
4251 dev_node = dev_to_node(dev);
4256 res = kzalloc(sizeof(*res), GFP_KERNEL);
4261 res->distance = node_distance(dev_node, node);
4263 list_for_each_entry(tmp, head, list) {
4264 if (res->distance < tmp->distance) {
4269 list_add_tail(&res->list, n);
4276 * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
4277 * @qm_list: The list of all available devices.
4278 * @qp_num: The number of queue pairs need created.
4279 * @alg_type: The algorithm type.
4280 * @node: The numa node.
4281 * @qps: The queue pairs need created.
4283 * This function will sort all available device according to numa distance.
4284 * Then try to create all queue pairs from one device, if all devices do
4285 * not meet the requirements will return error.
4287 int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
4288 u8 alg_type, int node, struct hisi_qp **qps)
4290 struct hisi_qm_resource *tmp;
4295 if (!qps || !qm_list || qp_num <= 0)
4298 mutex_lock(&qm_list->lock);
4299 if (hisi_qm_sort_devices(node, &head, qm_list)) {
4300 mutex_unlock(&qm_list->lock);
4304 list_for_each_entry(tmp, &head, list) {
4305 for (i = 0; i < qp_num; i++) {
4306 qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
4307 if (IS_ERR(qps[i])) {
4308 hisi_qm_free_qps(qps, i);
4319 mutex_unlock(&qm_list->lock);
4321 pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n",
4322 node, alg_type, qp_num);
4328 EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
4330 static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
4332 u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j;
4333 u32 max_qp_num = qm->max_qp_num;
4334 u32 q_base = qm->qp_num;
4340 vfs_q_num = qm->ctrl_qp_num - qm->qp_num;
4342 /* If vfs_q_num is less than num_vfs, return error. */
4343 if (vfs_q_num < num_vfs)
4346 q_num = vfs_q_num / num_vfs;
4347 remain_q_num = vfs_q_num % num_vfs;
4349 for (i = num_vfs; i > 0; i--) {
4351 * if q_num + remain_q_num > max_qp_num in last vf, divide the
4352 * remaining queues equally.
4354 if (i == num_vfs && q_num + remain_q_num <= max_qp_num) {
4355 act_q_num = q_num + remain_q_num;
4357 } else if (remain_q_num > 0) {
4358 act_q_num = q_num + 1;
4364 act_q_num = min_t(int, act_q_num, max_qp_num);
4365 ret = hisi_qm_set_vft(qm, i, q_base, act_q_num);
4367 for (j = num_vfs; j > i; j--)
4368 hisi_qm_set_vft(qm, j, 0, 0);
4371 q_base += act_q_num;
4377 static int qm_clear_vft_config(struct hisi_qm *qm)
4382 for (i = 1; i <= qm->vfs_num; i++) {
4383 ret = hisi_qm_set_vft(qm, i, 0, 0);
4392 static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos)
4394 struct device *dev = &qm->pdev->dev;
4395 u32 ir = qos * QM_QOS_RATE;
4396 int ret, total_vfs, i;
4398 total_vfs = pci_sriov_get_totalvfs(qm->pdev);
4399 if (fun_index > total_vfs)
4402 qm->factor[fun_index].func_qos = qos;
4404 ret = qm_get_shaper_para(ir, &qm->factor[fun_index]);
4406 dev_err(dev, "failed to calculate shaper parameter!\n");
4410 for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) {
4411 /* The base number of queue reuse for different alg type */
4412 ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1);
4414 dev_err(dev, "type: %d, failed to set shaper vft!\n", i);
4422 static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index)
4424 u64 cir_u = 0, cir_b = 0, cir_s = 0;
4425 u64 shaper_vft, ir_calc, ir;
4430 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
4431 val & BIT(0), POLL_PERIOD,
4436 writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR);
4437 writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE);
4438 writel(fun_index, qm->io_base + QM_VFT_CFG);
4440 writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
4441 writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
4443 ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
4444 val & BIT(0), POLL_PERIOD,
4449 shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) |
4450 ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32);
4452 cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK;
4453 cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK;
4454 cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT;
4456 cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK;
4457 cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT;
4459 ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s);
4461 ir = qm->factor[fun_index].func_qos * QM_QOS_RATE;
4463 error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir;
4464 if (error_rate > QM_QOS_MIN_ERROR_RATE) {
4465 pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate);
4472 static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num)
4474 struct device *dev = &qm->pdev->dev;
4479 qos = qm_get_shaper_vft_qos(qm, fun_num);
4481 dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num);
4485 mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT;
4486 ret = qm_ping_single_vf(qm, mb_cmd, fun_num);
4488 dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num);
4491 static int qm_vf_read_qos(struct hisi_qm *qm)
4496 /* reset mailbox qos val */
4499 /* vf ping pf to get function qos */
4500 if (qm->ops->ping_pf) {
4501 ret = qm->ops->ping_pf(qm, QM_VF_GET_QOS);
4503 pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n");
4509 msleep(QM_WAIT_DST_ACK);
4513 if (++cnt > QM_MAX_VF_WAIT_COUNT) {
4514 pci_err(qm->pdev, "PF ping VF timeout!\n");
4522 static ssize_t qm_algqos_read(struct file *filp, char __user *buf,
4523 size_t count, loff_t *pos)
4525 struct hisi_qm *qm = filp->private_data;
4526 char tbuf[QM_DBG_READ_LEN];
4530 ret = hisi_qm_get_dfx_access(qm);
4534 /* Mailbox and reset cannot be operated at the same time */
4535 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4536 pci_err(qm->pdev, "dev resetting, read alg qos failed!\n");
4538 goto err_put_dfx_access;
4541 if (qm->fun_type == QM_HW_PF) {
4542 ir = qm_get_shaper_vft_qos(qm, 0);
4544 ret = qm_vf_read_qos(qm);
4546 goto err_get_status;
4550 qos_val = ir / QM_QOS_RATE;
4551 ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val);
4553 ret = simple_read_from_buffer(buf, count, pos, tbuf, ret);
4556 clear_bit(QM_RESETTING, &qm->misc_ctl);
4558 hisi_qm_put_dfx_access(qm);
4562 static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
4564 int buflen = strlen(buf);
4567 for (i = 0; i < buflen; i++) {
4568 if (!isdigit(buf[i]))
4572 ret = sscanf(buf, "%lu", val);
4573 if (ret != QM_QOS_VAL_NUM)
4579 static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf,
4581 unsigned int *fun_index)
4583 char tbuf_bdf[QM_DBG_READ_LEN] = {0};
4584 char val_buf[QM_QOS_VAL_MAX_LEN] = {0};
4585 u32 tmp1, device, function;
4588 ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf);
4589 if (ret != QM_QOS_PARAM_NUM)
4592 ret = qm_qos_value_init(val_buf, val);
4593 if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) {
4594 pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n");
4598 ret = sscanf(tbuf_bdf, "%u:%x:%u.%u", &tmp1, &bus, &device, &function);
4599 if (ret != QM_QOS_BDF_PARAM_NUM) {
4600 pci_err(qm->pdev, "input pci bdf value is error!\n");
4604 *fun_index = PCI_DEVFN(device, function);
4609 static ssize_t qm_algqos_write(struct file *filp, const char __user *buf,
4610 size_t count, loff_t *pos)
4612 struct hisi_qm *qm = filp->private_data;
4613 char tbuf[QM_DBG_READ_LEN];
4614 unsigned int fun_index;
4618 if (qm->fun_type == QM_HW_VF)
4624 if (count >= QM_DBG_READ_LEN)
4627 len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count);
4632 ret = qm_get_qos_value(qm, tbuf, &val, &fun_index);
4636 /* Mailbox and reset cannot be operated at the same time */
4637 if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) {
4638 pci_err(qm->pdev, "dev resetting, write alg qos failed!\n");
4642 ret = qm_pm_get_sync(qm);
4645 goto err_get_status;
4648 ret = qm_func_shaper_enable(qm, fun_index, val);
4650 pci_err(qm->pdev, "failed to enable function shaper!\n");
4655 pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n",
4662 clear_bit(QM_RESETTING, &qm->misc_ctl);
4666 static const struct file_operations qm_algqos_fops = {
4667 .owner = THIS_MODULE,
4668 .open = simple_open,
4669 .read = qm_algqos_read,
4670 .write = qm_algqos_write,
4674 * hisi_qm_set_algqos_init() - Initialize function qos debugfs files.
4675 * @qm: The qm for which we want to add debugfs files.
4677 * Create function qos debugfs files.
4679 static void hisi_qm_set_algqos_init(struct hisi_qm *qm)
4681 if (qm->fun_type == QM_HW_PF)
4682 debugfs_create_file("alg_qos", 0644, qm->debug.debug_root,
4683 qm, &qm_algqos_fops);
4685 debugfs_create_file("alg_qos", 0444, qm->debug.debug_root,
4686 qm, &qm_algqos_fops);
4690 * hisi_qm_debug_init() - Initialize qm related debugfs files.
4691 * @qm: The qm for which we want to add debugfs files.
4693 * Create qm related debugfs files.
4695 void hisi_qm_debug_init(struct hisi_qm *qm)
4697 struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs;
4698 struct qm_dfx *dfx = &qm->debug.dfx;
4699 struct dentry *qm_d;
4703 qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
4704 qm->debug.qm_d = qm_d;
4706 /* only show this in PF */
4707 if (qm->fun_type == QM_HW_PF) {
4708 qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM);
4709 for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
4710 qm_create_debugfs_file(qm, qm->debug.qm_d, i);
4714 debugfs_create_file("diff_regs", 0444, qm->debug.qm_d,
4715 qm, &qm_diff_regs_fops);
4717 debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
4719 debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops);
4721 debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
4723 for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
4724 data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
4725 debugfs_create_file(qm_dfx_files[i].name,
4732 if (qm->ver >= QM_HW_V3)
4733 hisi_qm_set_algqos_init(qm);
4735 EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
4738 * hisi_qm_debug_regs_clear() - clear qm debug related registers.
4739 * @qm: The qm for which we want to clear its debug registers.
4741 void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
4743 const struct debugfs_reg32 *regs;
4746 /* clear current_qm */
4747 writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF);
4748 writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF);
4750 /* clear current_q */
4751 writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
4752 writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
4755 * these registers are reading and clearing, so clear them after
4758 writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
4761 for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
4762 readl(qm->io_base + regs->offset);
4766 /* clear clear_enable */
4767 writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
4769 EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
4772 * hisi_qm_sriov_enable() - enable virtual functions
4773 * @pdev: the PCIe device
4774 * @max_vfs: the number of virtual functions to enable
4776 * Returns the number of enabled VFs. If there are VFs enabled already or
4777 * max_vfs is more than the total number of device can be enabled, returns
4780 int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
4782 struct hisi_qm *qm = pci_get_drvdata(pdev);
4783 int pre_existing_vfs, num_vfs, total_vfs, ret;
4785 ret = qm_pm_get_sync(qm);
4789 total_vfs = pci_sriov_get_totalvfs(pdev);
4790 pre_existing_vfs = pci_num_vf(pdev);
4791 if (pre_existing_vfs) {
4792 pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
4797 num_vfs = min_t(int, max_vfs, total_vfs);
4798 ret = qm_vf_q_assign(qm, num_vfs);
4800 pci_err(pdev, "Can't assign queues for VF!\n");
4804 qm->vfs_num = num_vfs;
4806 ret = pci_enable_sriov(pdev, num_vfs);
4808 pci_err(pdev, "Can't enable VF!\n");
4809 qm_clear_vft_config(qm);
4813 pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
4821 EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
4824 * hisi_qm_sriov_disable - disable virtual functions
4825 * @pdev: the PCI device.
4826 * @is_frozen: true when all the VFs are frozen.
4828 * Return failure if there are VFs assigned already or VF is in used.
4830 int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
4832 struct hisi_qm *qm = pci_get_drvdata(pdev);
4833 int total_vfs = pci_sriov_get_totalvfs(qm->pdev);
4836 if (pci_vfs_assigned(pdev)) {
4837 pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
4841 /* While VF is in used, SRIOV cannot be disabled. */
4842 if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
4843 pci_err(pdev, "Task is using its VF!\n");
4847 pci_disable_sriov(pdev);
4848 /* clear vf function shaper configure array */
4849 memset(qm->factor + 1, 0, sizeof(struct qm_shaper_factor) * total_vfs);
4850 ret = qm_clear_vft_config(qm);
4858 EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
4861 * hisi_qm_sriov_configure - configure the number of VFs
4862 * @pdev: The PCI device
4863 * @num_vfs: The number of VFs need enabled
4865 * Enable SR-IOV according to num_vfs, 0 means disable.
4867 int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
4870 return hisi_qm_sriov_disable(pdev, false);
4872 return hisi_qm_sriov_enable(pdev, num_vfs);
4874 EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
4876 static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
4880 if (!qm->err_ini->get_dev_hw_err_status) {
4881 dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
4882 return ACC_ERR_NONE;
4885 /* get device hardware error status */
4886 err_sts = qm->err_ini->get_dev_hw_err_status(qm);
4888 if (err_sts & qm->err_info.ecc_2bits_mask)
4889 qm->err_status.is_dev_ecc_mbit = true;
4891 if (qm->err_ini->log_dev_hw_err)
4892 qm->err_ini->log_dev_hw_err(qm, err_sts);
4894 /* ce error does not need to be reset */
4895 if ((err_sts | qm->err_info.dev_ce_mask) ==
4896 qm->err_info.dev_ce_mask) {
4897 if (qm->err_ini->clear_dev_hw_err_status)
4898 qm->err_ini->clear_dev_hw_err_status(qm,
4901 return ACC_ERR_RECOVERED;
4904 return ACC_ERR_NEED_RESET;
4907 return ACC_ERR_RECOVERED;
4910 static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
4912 enum acc_err_result qm_ret, dev_ret;
4915 qm_ret = qm_hw_error_handle(qm);
4917 /* log device error */
4918 dev_ret = qm_dev_err_handle(qm);
4920 return (qm_ret == ACC_ERR_NEED_RESET ||
4921 dev_ret == ACC_ERR_NEED_RESET) ?
4922 ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
4926 * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
4927 * @pdev: The PCI device which need report error.
4928 * @state: The connectivity between CPU and device.
4930 * We register this function into PCIe AER handlers, It will report device or
4931 * qm hardware error status when error occur.
4933 pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
4934 pci_channel_state_t state)
4936 struct hisi_qm *qm = pci_get_drvdata(pdev);
4937 enum acc_err_result ret;
4939 if (pdev->is_virtfn)
4940 return PCI_ERS_RESULT_NONE;
4942 pci_info(pdev, "PCI error detected, state(=%u)!!\n", state);
4943 if (state == pci_channel_io_perm_failure)
4944 return PCI_ERS_RESULT_DISCONNECT;
4946 ret = qm_process_dev_error(qm);
4947 if (ret == ACC_ERR_NEED_RESET)
4948 return PCI_ERS_RESULT_NEED_RESET;
4950 return PCI_ERS_RESULT_RECOVERED;
4952 EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
4954 static int qm_check_req_recv(struct hisi_qm *qm)
4956 struct pci_dev *pdev = qm->pdev;
4960 if (qm->ver >= QM_HW_V3)
4963 writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
4964 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4965 (val == ACC_VENDOR_ID_VALUE),
4966 POLL_PERIOD, POLL_TIMEOUT);
4968 dev_err(&pdev->dev, "Fails to read QM reg!\n");
4972 writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
4973 ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
4974 (val == PCI_VENDOR_ID_HUAWEI),
4975 POLL_PERIOD, POLL_TIMEOUT);
4977 dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
4982 static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
4984 struct pci_dev *pdev = qm->pdev;
4988 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
4990 cmd |= PCI_COMMAND_MEMORY;
4992 cmd &= ~PCI_COMMAND_MEMORY;
4994 pci_write_config_word(pdev, PCI_COMMAND, cmd);
4995 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
4996 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
4997 if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
5006 static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
5008 struct pci_dev *pdev = qm->pdev;
5013 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
5014 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
5016 sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
5018 sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
5019 pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
5021 for (i = 0; i < MAX_WAIT_COUNTS; i++) {
5022 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
5023 if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
5024 ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
5033 static int qm_vf_reset_prepare(struct hisi_qm *qm,
5034 enum qm_stop_reason stop_reason)
5036 struct hisi_qm_list *qm_list = qm->qm_list;
5037 struct pci_dev *pdev = qm->pdev;
5038 struct pci_dev *virtfn;
5039 struct hisi_qm *vf_qm;
5042 mutex_lock(&qm_list->lock);
5043 list_for_each_entry(vf_qm, &qm_list->list, list) {
5044 virtfn = vf_qm->pdev;
5048 if (pci_physfn(virtfn) == pdev) {
5049 /* save VFs PCIE BAR configuration */
5050 pci_save_state(virtfn);
5052 ret = hisi_qm_stop(vf_qm, stop_reason);
5059 mutex_unlock(&qm_list->lock);
5063 static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd,
5064 enum qm_stop_reason stop_reason)
5066 struct pci_dev *pdev = qm->pdev;
5072 /* Kunpeng930 supports to notify VFs to stop before PF reset */
5073 if (qm->ops->ping_all_vfs) {
5074 ret = qm->ops->ping_all_vfs(qm, cmd);
5076 pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n");
5078 ret = qm_vf_reset_prepare(qm, stop_reason);
5080 pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret);
5086 static int qm_controller_reset_prepare(struct hisi_qm *qm)
5088 struct pci_dev *pdev = qm->pdev;
5091 ret = qm_reset_prepare_ready(qm);
5093 pci_err(pdev, "Controller reset not ready!\n");
5097 /* PF obtains the information of VF by querying the register. */
5100 /* Whether VFs stop successfully, soft reset will continue. */
5101 ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET);
5103 pci_err(pdev, "failed to stop vfs by pf in soft reset.\n");
5105 ret = hisi_qm_stop(qm, QM_SOFT_RESET);
5107 pci_err(pdev, "Fails to stop QM!\n");
5108 qm_reset_bit_clear(qm);
5112 ret = qm_wait_vf_prepare_finish(qm);
5114 pci_err(pdev, "failed to stop by vfs in soft reset!\n");
5116 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
5121 static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
5125 /* Kunpeng930 hardware automatically close master ooo when NFE occurs */
5126 if (qm->ver >= QM_HW_V3)
5129 if (!qm->err_status.is_dev_ecc_mbit &&
5130 qm->err_status.is_qm_ecc_mbit &&
5131 qm->err_ini->close_axi_master_ooo) {
5133 qm->err_ini->close_axi_master_ooo(qm);
5135 } else if (qm->err_status.is_dev_ecc_mbit &&
5136 !qm->err_status.is_qm_ecc_mbit &&
5137 !qm->err_ini->close_axi_master_ooo) {
5139 nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
5140 writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
5141 qm->io_base + QM_RAS_NFE_ENABLE);
5142 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
5146 static int qm_soft_reset(struct hisi_qm *qm)
5148 struct pci_dev *pdev = qm->pdev;
5152 /* Ensure all doorbells and mailboxes received by QM */
5153 ret = qm_check_req_recv(qm);
5158 ret = qm_set_vf_mse(qm, false);
5160 pci_err(pdev, "Fails to disable vf MSE bit.\n");
5165 ret = qm->ops->set_msi(qm, false);
5167 pci_err(pdev, "Fails to disable PEH MSI bit.\n");
5171 qm_dev_ecc_mbit_handle(qm);
5173 /* OOO register set and check */
5174 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
5175 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
5177 /* If bus lock, reset chip */
5178 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
5180 (val == ACC_MASTER_TRANS_RETURN_RW),
5181 POLL_PERIOD, POLL_TIMEOUT);
5183 pci_emerg(pdev, "Bus lock! Please reset system.\n");
5187 if (qm->err_ini->close_sva_prefetch)
5188 qm->err_ini->close_sva_prefetch(qm);
5190 ret = qm_set_pf_mse(qm, false);
5192 pci_err(pdev, "Fails to disable pf MSE bit.\n");
5196 /* The reset related sub-control registers are not in PCI BAR */
5197 if (ACPI_HANDLE(&pdev->dev)) {
5198 unsigned long long value = 0;
5201 s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
5202 qm->err_info.acpi_rst,
5204 if (ACPI_FAILURE(s)) {
5205 pci_err(pdev, "NO controller reset method!\n");
5210 pci_err(pdev, "Reset step %llu failed!\n", value);
5214 pci_err(pdev, "No reset method!\n");
5221 static int qm_vf_reset_done(struct hisi_qm *qm)
5223 struct hisi_qm_list *qm_list = qm->qm_list;
5224 struct pci_dev *pdev = qm->pdev;
5225 struct pci_dev *virtfn;
5226 struct hisi_qm *vf_qm;
5229 mutex_lock(&qm_list->lock);
5230 list_for_each_entry(vf_qm, &qm_list->list, list) {
5231 virtfn = vf_qm->pdev;
5235 if (pci_physfn(virtfn) == pdev) {
5236 /* enable VFs PCIE BAR configuration */
5237 pci_restore_state(virtfn);
5239 ret = qm_restart(vf_qm);
5246 mutex_unlock(&qm_list->lock);
5250 static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd)
5252 struct pci_dev *pdev = qm->pdev;
5258 ret = qm_vf_q_assign(qm, qm->vfs_num);
5260 pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret);
5264 /* Kunpeng930 supports to notify VFs to start after PF reset. */
5265 if (qm->ops->ping_all_vfs) {
5266 ret = qm->ops->ping_all_vfs(qm, cmd);
5268 pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n");
5270 ret = qm_vf_reset_done(qm);
5272 pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret);
5278 static int qm_dev_hw_init(struct hisi_qm *qm)
5280 return qm->err_ini->hw_init(qm);
5283 static void qm_restart_prepare(struct hisi_qm *qm)
5287 if (qm->err_ini->open_sva_prefetch)
5288 qm->err_ini->open_sva_prefetch(qm);
5290 if (qm->ver >= QM_HW_V3)
5293 if (!qm->err_status.is_qm_ecc_mbit &&
5294 !qm->err_status.is_dev_ecc_mbit)
5297 /* temporarily close the OOO port used for PEH to write out MSI */
5298 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5299 writel(value & ~qm->err_info.msi_wr_port,
5300 qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5302 /* clear dev ecc 2bit error source if having */
5303 value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
5304 if (value && qm->err_ini->clear_dev_hw_err_status)
5305 qm->err_ini->clear_dev_hw_err_status(qm, value);
5307 /* clear QM ecc mbit error source */
5308 writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
5310 /* clear AM Reorder Buffer ecc mbit source */
5311 writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
5314 static void qm_restart_done(struct hisi_qm *qm)
5318 if (qm->ver >= QM_HW_V3)
5321 if (!qm->err_status.is_qm_ecc_mbit &&
5322 !qm->err_status.is_dev_ecc_mbit)
5325 /* open the OOO port for PEH to write out MSI */
5326 value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5327 value |= qm->err_info.msi_wr_port;
5328 writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
5331 qm->err_status.is_qm_ecc_mbit = false;
5332 qm->err_status.is_dev_ecc_mbit = false;
5335 static int qm_controller_reset_done(struct hisi_qm *qm)
5337 struct pci_dev *pdev = qm->pdev;
5340 ret = qm->ops->set_msi(qm, true);
5342 pci_err(pdev, "Fails to enable PEH MSI bit!\n");
5346 ret = qm_set_pf_mse(qm, true);
5348 pci_err(pdev, "Fails to enable pf MSE bit!\n");
5353 ret = qm_set_vf_mse(qm, true);
5355 pci_err(pdev, "Fails to enable vf MSE bit!\n");
5360 ret = qm_dev_hw_init(qm);
5362 pci_err(pdev, "Failed to init device\n");
5366 qm_restart_prepare(qm);
5367 hisi_qm_dev_err_init(qm);
5368 if (qm->err_ini->open_axi_master_ooo)
5369 qm->err_ini->open_axi_master_ooo(qm);
5371 ret = qm_dev_mem_reset(qm);
5373 pci_err(pdev, "failed to reset device memory\n");
5377 ret = qm_restart(qm);
5379 pci_err(pdev, "Failed to start QM!\n");
5383 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
5385 pci_err(pdev, "failed to start vfs by pf in soft reset.\n");
5387 ret = qm_wait_vf_prepare_finish(qm);
5389 pci_err(pdev, "failed to start by vfs in soft reset!\n");
5392 qm_restart_done(qm);
5394 qm_reset_bit_clear(qm);
5399 static void qm_show_last_dfx_regs(struct hisi_qm *qm)
5401 struct qm_debug *debug = &qm->debug;
5402 struct pci_dev *pdev = qm->pdev;
5406 if (qm->fun_type == QM_HW_VF || !debug->qm_last_words)
5409 for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) {
5410 val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset);
5411 if (debug->qm_last_words[i] != val)
5412 pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n",
5413 qm_dfx_regs[i].name, debug->qm_last_words[i], val);
5417 static int qm_controller_reset(struct hisi_qm *qm)
5419 struct pci_dev *pdev = qm->pdev;
5422 pci_info(pdev, "Controller resetting...\n");
5424 ret = qm_controller_reset_prepare(qm);
5426 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5427 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5428 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
5432 qm_show_last_dfx_regs(qm);
5433 if (qm->err_ini->show_last_dfx_regs)
5434 qm->err_ini->show_last_dfx_regs(qm);
5436 ret = qm_soft_reset(qm);
5438 pci_err(pdev, "Controller reset failed (%d)\n", ret);
5439 qm_reset_bit_clear(qm);
5443 ret = qm_controller_reset_done(qm);
5445 qm_reset_bit_clear(qm);
5449 pci_info(pdev, "Controller reset complete\n");
5455 * hisi_qm_dev_slot_reset() - slot reset
5456 * @pdev: the PCIe device
5458 * This function offers QM relate PCIe device reset interface. Drivers which
5459 * use QM can use this function as slot_reset in its struct pci_error_handlers.
5461 pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
5463 struct hisi_qm *qm = pci_get_drvdata(pdev);
5466 if (pdev->is_virtfn)
5467 return PCI_ERS_RESULT_RECOVERED;
5469 pci_aer_clear_nonfatal_status(pdev);
5471 /* reset pcie device controller */
5472 ret = qm_controller_reset(qm);
5474 pci_err(pdev, "Controller reset failed (%d)\n", ret);
5475 return PCI_ERS_RESULT_DISCONNECT;
5478 return PCI_ERS_RESULT_RECOVERED;
5480 EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
5482 void hisi_qm_reset_prepare(struct pci_dev *pdev)
5484 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
5485 struct hisi_qm *qm = pci_get_drvdata(pdev);
5489 hisi_qm_dev_err_uninit(pf_qm);
5492 * Check whether there is an ECC mbit error, If it occurs, need to
5493 * wait for soft reset to fix it.
5495 while (qm_check_dev_error(pf_qm)) {
5497 if (delay > QM_RESET_WAIT_TIMEOUT)
5501 ret = qm_reset_prepare_ready(qm);
5503 pci_err(pdev, "FLR not ready!\n");
5507 /* PF obtains the information of VF by querying the register. */
5508 if (qm->fun_type == QM_HW_PF)
5511 ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_FLR);
5513 pci_err(pdev, "failed to stop vfs by pf in FLR.\n");
5515 ret = hisi_qm_stop(qm, QM_FLR);
5517 pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
5518 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5519 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5523 ret = qm_wait_vf_prepare_finish(qm);
5525 pci_err(pdev, "failed to stop by vfs in FLR!\n");
5527 pci_info(pdev, "FLR resetting...\n");
5529 EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
5531 static bool qm_flr_reset_complete(struct pci_dev *pdev)
5533 struct pci_dev *pf_pdev = pci_physfn(pdev);
5534 struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
5537 pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
5538 if (id == QM_PCI_COMMAND_INVALID) {
5539 pci_err(pdev, "Device can not be used!\n");
5546 void hisi_qm_reset_done(struct pci_dev *pdev)
5548 struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
5549 struct hisi_qm *qm = pci_get_drvdata(pdev);
5552 if (qm->fun_type == QM_HW_PF) {
5553 ret = qm_dev_hw_init(qm);
5555 pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
5560 hisi_qm_dev_err_init(pf_qm);
5562 ret = qm_restart(qm);
5564 pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
5568 ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE);
5570 pci_err(pdev, "failed to start vfs by pf in FLR.\n");
5572 ret = qm_wait_vf_prepare_finish(qm);
5574 pci_err(pdev, "failed to start by vfs in FLR!\n");
5577 if (qm->fun_type == QM_HW_PF)
5580 if (qm_flr_reset_complete(pdev))
5581 pci_info(pdev, "FLR reset complete\n");
5583 qm_reset_bit_clear(qm);
5585 EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
5587 static irqreturn_t qm_abnormal_irq(int irq, void *data)
5589 struct hisi_qm *qm = data;
5590 enum acc_err_result ret;
5592 atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
5593 ret = qm_process_dev_error(qm);
5594 if (ret == ACC_ERR_NEED_RESET &&
5595 !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) &&
5596 !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl))
5597 schedule_work(&qm->rst_work);
5602 static int qm_irq_register(struct hisi_qm *qm)
5604 struct pci_dev *pdev = qm->pdev;
5607 ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
5608 qm_irq, 0, qm->dev_name, qm);
5612 if (qm->ver > QM_HW_V1) {
5613 ret = request_threaded_irq(pci_irq_vector(pdev,
5614 QM_AEQ_EVENT_IRQ_VECTOR),
5615 qm_aeq_irq, qm_aeq_thread,
5616 0, qm->dev_name, qm);
5620 if (qm->fun_type == QM_HW_PF) {
5621 ret = request_irq(pci_irq_vector(pdev,
5622 QM_ABNORMAL_EVENT_IRQ_VECTOR),
5623 qm_abnormal_irq, 0, qm->dev_name, qm);
5625 goto err_abonormal_irq;
5629 if (qm->ver > QM_HW_V2) {
5630 ret = request_irq(pci_irq_vector(pdev, QM_CMD_EVENT_IRQ_VECTOR),
5631 qm_mb_cmd_irq, 0, qm->dev_name, qm);
5633 goto err_mb_cmd_irq;
5639 if (qm->fun_type == QM_HW_PF)
5640 free_irq(pci_irq_vector(pdev, QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
5642 free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
5644 free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
5649 * hisi_qm_dev_shutdown() - Shutdown device.
5650 * @pdev: The device will be shutdown.
5652 * This function will stop qm when OS shutdown or rebooting.
5654 void hisi_qm_dev_shutdown(struct pci_dev *pdev)
5656 struct hisi_qm *qm = pci_get_drvdata(pdev);
5659 ret = hisi_qm_stop(qm, QM_NORMAL);
5661 dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
5663 EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
5665 static void hisi_qm_controller_reset(struct work_struct *rst_work)
5667 struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
5670 ret = qm_pm_get_sync(qm);
5672 clear_bit(QM_RST_SCHED, &qm->misc_ctl);
5676 /* reset pcie device controller */
5677 ret = qm_controller_reset(qm);
5679 dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
5684 static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
5685 enum qm_stop_reason stop_reason)
5687 enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE;
5688 struct pci_dev *pdev = qm->pdev;
5691 ret = qm_reset_prepare_ready(qm);
5693 dev_err(&pdev->dev, "reset prepare not ready!\n");
5694 atomic_set(&qm->status.flags, QM_STOP);
5695 cmd = QM_VF_PREPARE_FAIL;
5699 ret = hisi_qm_stop(qm, stop_reason);
5701 dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret);
5702 atomic_set(&qm->status.flags, QM_STOP);
5703 cmd = QM_VF_PREPARE_FAIL;
5710 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
5711 hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
5713 pci_save_state(pdev);
5714 ret = qm->ops->ping_pf(qm, cmd);
5716 dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n");
5719 static void qm_pf_reset_vf_done(struct hisi_qm *qm)
5721 enum qm_mb_cmd cmd = QM_VF_START_DONE;
5722 struct pci_dev *pdev = qm->pdev;
5725 pci_restore_state(pdev);
5726 ret = hisi_qm_start(qm);
5728 dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret);
5729 cmd = QM_VF_START_FAIL;
5732 ret = qm->ops->ping_pf(qm, cmd);
5734 dev_warn(&pdev->dev, "PF responds timeout in reset done!\n");
5736 qm_reset_bit_clear(qm);
5739 static int qm_wait_pf_reset_finish(struct hisi_qm *qm)
5741 struct device *dev = &qm->pdev->dev;
5746 /* Wait for reset to finish */
5747 ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val,
5748 val == BIT(0), QM_VF_RESET_WAIT_US,
5749 QM_VF_RESET_WAIT_TIMEOUT_US);
5750 /* hardware completion status should be available by this time */
5752 dev_err(dev, "couldn't get reset done status from PF, timeout!\n");
5757 * Whether message is got successfully,
5758 * VF needs to ack PF by clearing the interrupt.
5760 ret = qm_get_mb_cmd(qm, &msg, 0);
5761 qm_clear_cmd_interrupt(qm, 0);
5763 dev_err(dev, "failed to get msg from PF in reset done!\n");
5767 cmd = msg & QM_MB_CMD_DATA_MASK;
5768 if (cmd != QM_PF_RESET_DONE) {
5769 dev_err(dev, "the cmd(%u) is not reset done!\n", cmd);
5776 static void qm_pf_reset_vf_process(struct hisi_qm *qm,
5777 enum qm_stop_reason stop_reason)
5779 struct device *dev = &qm->pdev->dev;
5782 dev_info(dev, "device reset start...\n");
5784 /* The message is obtained by querying the register during resetting */
5786 qm_pf_reset_vf_prepare(qm, stop_reason);
5788 ret = qm_wait_pf_reset_finish(qm);
5790 goto err_get_status;
5792 qm_pf_reset_vf_done(qm);
5795 dev_info(dev, "device reset done.\n");
5801 qm_reset_bit_clear(qm);
5804 static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num)
5806 struct device *dev = &qm->pdev->dev;
5812 * Get the msg from source by sending mailbox. Whether message is got
5813 * successfully, destination needs to ack source by clearing the interrupt.
5815 ret = qm_get_mb_cmd(qm, &msg, fun_num);
5816 qm_clear_cmd_interrupt(qm, BIT(fun_num));
5818 dev_err(dev, "failed to get msg from source!\n");
5822 cmd = msg & QM_MB_CMD_DATA_MASK;
5824 case QM_PF_FLR_PREPARE:
5825 qm_pf_reset_vf_process(qm, QM_FLR);
5827 case QM_PF_SRST_PREPARE:
5828 qm_pf_reset_vf_process(qm, QM_SOFT_RESET);
5831 qm_vf_get_qos(qm, fun_num);
5834 qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT;
5837 dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num);
5842 static void qm_cmd_process(struct work_struct *cmd_process)
5844 struct hisi_qm *qm = container_of(cmd_process,
5845 struct hisi_qm, cmd_process);
5846 u32 vfs_num = qm->vfs_num;
5850 if (qm->fun_type == QM_HW_PF) {
5851 val = readq(qm->io_base + QM_IFC_INT_SOURCE_P);
5855 for (i = 1; i <= vfs_num; i++) {
5857 qm_handle_cmd_msg(qm, i);
5863 qm_handle_cmd_msg(qm, 0);
5867 * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
5868 * @qm: The qm needs add.
5869 * @qm_list: The qm list.
5871 * This function adds qm to qm list, and will register algorithm to
5872 * crypto when the qm list is empty.
5874 int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
5876 struct device *dev = &qm->pdev->dev;
5880 mutex_lock(&qm_list->lock);
5881 if (list_empty(&qm_list->list))
5883 list_add_tail(&qm->list, &qm_list->list);
5884 mutex_unlock(&qm_list->lock);
5886 if (qm->ver <= QM_HW_V2 && qm->use_sva) {
5887 dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n");
5892 ret = qm_list->register_to_crypto(qm);
5894 mutex_lock(&qm_list->lock);
5895 list_del(&qm->list);
5896 mutex_unlock(&qm_list->lock);
5902 EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
5905 * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
5907 * @qm: The qm needs delete.
5908 * @qm_list: The qm list.
5910 * This function deletes qm from qm list, and will unregister algorithm
5911 * from crypto when the qm list is empty.
5913 void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
5915 mutex_lock(&qm_list->lock);
5916 list_del(&qm->list);
5917 mutex_unlock(&qm_list->lock);
5919 if (qm->ver <= QM_HW_V2 && qm->use_sva)
5922 if (list_empty(&qm_list->list))
5923 qm_list->unregister_from_crypto(qm);
5925 EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
5927 static int qm_get_qp_num(struct hisi_qm *qm)
5929 if (qm->ver == QM_HW_V1)
5930 qm->ctrl_qp_num = QM_QNUM_V1;
5931 else if (qm->ver == QM_HW_V2)
5932 qm->ctrl_qp_num = QM_QNUM_V2;
5934 qm->ctrl_qp_num = readl(qm->io_base + QM_CAPBILITY) &
5937 if (qm->use_db_isolation)
5938 qm->max_qp_num = (readl(qm->io_base + QM_CAPBILITY) >>
5939 QM_QP_MAX_NUM_SHIFT) & QM_QP_NUN_MASK;
5941 qm->max_qp_num = qm->ctrl_qp_num;
5943 /* check if qp number is valid */
5944 if (qm->qp_num > qm->max_qp_num) {
5945 dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n",
5946 qm->qp_num, qm->max_qp_num);
5953 static int qm_get_pci_res(struct hisi_qm *qm)
5955 struct pci_dev *pdev = qm->pdev;
5956 struct device *dev = &pdev->dev;
5959 ret = pci_request_mem_regions(pdev, qm->dev_name);
5961 dev_err(dev, "Failed to request mem regions!\n");
5965 qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
5966 qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2));
5969 goto err_request_mem_regions;
5972 if (qm->ver > QM_HW_V2) {
5973 if (qm->fun_type == QM_HW_PF)
5974 qm->use_db_isolation = readl(qm->io_base +
5975 QM_QUE_ISO_EN) & BIT(0);
5977 qm->use_db_isolation = readl(qm->io_base +
5978 QM_QUE_ISO_CFG_V) & BIT(0);
5981 if (qm->use_db_isolation) {
5982 qm->db_interval = QM_QP_DB_INTERVAL;
5983 qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4);
5984 qm->db_io_base = ioremap(qm->db_phys_base,
5985 pci_resource_len(pdev, PCI_BAR_4));
5986 if (!qm->db_io_base) {
5991 qm->db_phys_base = qm->phys_base;
5992 qm->db_io_base = qm->io_base;
5993 qm->db_interval = 0;
5996 if (qm->fun_type == QM_HW_PF) {
5997 ret = qm_get_qp_num(qm);
5999 goto err_db_ioremap;
6005 if (qm->use_db_isolation)
6006 iounmap(qm->db_io_base);
6008 iounmap(qm->io_base);
6009 err_request_mem_regions:
6010 pci_release_mem_regions(pdev);
6014 static int hisi_qm_pci_init(struct hisi_qm *qm)
6016 struct pci_dev *pdev = qm->pdev;
6017 struct device *dev = &pdev->dev;
6018 unsigned int num_vec;
6021 ret = pci_enable_device_mem(pdev);
6023 dev_err(dev, "Failed to enable device mem!\n");
6027 ret = qm_get_pci_res(qm);
6029 goto err_disable_pcidev;
6031 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
6033 goto err_get_pci_res;
6034 pci_set_master(pdev);
6036 if (!qm->ops->get_irq_num) {
6038 goto err_get_pci_res;
6040 num_vec = qm->ops->get_irq_num(qm);
6041 ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
6043 dev_err(dev, "Failed to enable MSI vectors!\n");
6044 goto err_get_pci_res;
6052 pci_disable_device(pdev);
6056 static int hisi_qm_init_work(struct hisi_qm *qm)
6060 for (i = 0; i < qm->qp_num; i++)
6061 INIT_WORK(&qm->poll_data[i].work, qm_work_process);
6063 if (qm->fun_type == QM_HW_PF)
6064 INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
6066 if (qm->ver > QM_HW_V2)
6067 INIT_WORK(&qm->cmd_process, qm_cmd_process);
6069 qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM |
6070 WQ_UNBOUND, num_online_cpus(),
6071 pci_name(qm->pdev));
6073 pci_err(qm->pdev, "failed to alloc workqueue!\n");
6080 static int hisi_qp_alloc_memory(struct hisi_qm *qm)
6082 struct device *dev = &qm->pdev->dev;
6086 qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
6090 qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL);
6091 if (!qm->poll_data) {
6092 kfree(qm->qp_array);
6096 /* one more page for device or qp statuses */
6097 qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
6098 sizeof(struct qm_cqe) * QM_Q_DEPTH;
6099 qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE;
6100 for (i = 0; i < qm->qp_num; i++) {
6101 qm->poll_data[i].qm = qm;
6102 ret = hisi_qp_memory_init(qm, qp_dma_size, i);
6104 goto err_init_qp_mem;
6106 dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
6111 hisi_qp_memory_uninit(qm, i);
6116 static int hisi_qm_memory_init(struct hisi_qm *qm)
6118 struct device *dev = &qm->pdev->dev;
6119 int ret, total_func, i;
6122 total_func = pci_sriov_get_totalvfs(qm->pdev) + 1;
6123 qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL);
6126 for (i = 0; i < total_func; i++)
6127 qm->factor[i].func_qos = QM_QOS_MAX_VAL;
6129 #define QM_INIT_BUF(qm, type, num) do { \
6130 (qm)->type = ((qm)->qdma.va + (off)); \
6131 (qm)->type##_dma = (qm)->qdma.dma + (off); \
6132 off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
6135 idr_init(&qm->qp_idr);
6136 qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
6137 QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
6138 QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
6139 QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
6140 qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
6142 dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
6145 goto err_alloc_qdma;
6148 QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
6149 QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
6150 QM_INIT_BUF(qm, sqc, qm->qp_num);
6151 QM_INIT_BUF(qm, cqc, qm->qp_num);
6153 ret = hisi_qp_alloc_memory(qm);
6155 goto err_alloc_qp_array;
6160 dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
6167 static void qm_last_regs_init(struct hisi_qm *qm)
6169 int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs);
6170 struct qm_debug *debug = &qm->debug;
6173 if (qm->fun_type == QM_HW_VF)
6176 debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int),
6178 if (!debug->qm_last_words)
6181 for (i = 0; i < dfx_regs_num; i++) {
6182 debug->qm_last_words[i] = readl_relaxed(qm->io_base +
6183 qm_dfx_regs[i].offset);
6188 * hisi_qm_init() - Initialize configures about qm.
6189 * @qm: The qm needing init.
6191 * This function init qm, then we can call hisi_qm_start to put qm into work.
6193 int hisi_qm_init(struct hisi_qm *qm)
6195 struct pci_dev *pdev = qm->pdev;
6196 struct device *dev = &pdev->dev;
6199 hisi_qm_pre_init(qm);
6201 ret = hisi_qm_pci_init(qm);
6205 ret = qm_irq_register(qm);
6209 if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
6210 /* v2 starts to support get vft by mailbox */
6211 ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
6213 goto err_irq_register;
6216 if (qm->fun_type == QM_HW_PF) {
6217 qm_disable_clock_gate(qm);
6218 ret = qm_dev_mem_reset(qm);
6220 dev_err(dev, "failed to reset device memory\n");
6221 goto err_irq_register;
6225 if (qm->mode == UACCE_MODE_SVA) {
6226 ret = qm_alloc_uacce(qm);
6228 dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
6231 ret = hisi_qm_memory_init(qm);
6233 goto err_alloc_uacce;
6235 ret = hisi_qm_init_work(qm);
6237 goto err_free_qm_memory;
6240 atomic_set(&qm->status.flags, QM_INIT);
6242 qm_last_regs_init(qm);
6247 hisi_qm_memory_uninit(qm);
6250 uacce_remove(qm->uacce);
6254 qm_irq_unregister(qm);
6256 hisi_qm_pci_uninit(qm);
6259 EXPORT_SYMBOL_GPL(hisi_qm_init);
6262 * hisi_qm_get_dfx_access() - Try to get dfx access.
6263 * @qm: pointer to accelerator device.
6265 * Try to get dfx access, then user can get message.
6267 * If device is in suspended, return failure, otherwise
6268 * bump up the runtime PM usage counter.
6270 int hisi_qm_get_dfx_access(struct hisi_qm *qm)
6272 struct device *dev = &qm->pdev->dev;
6274 if (pm_runtime_suspended(dev)) {
6275 dev_info(dev, "can not read/write - device in suspended.\n");
6279 return qm_pm_get_sync(qm);
6281 EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access);
6284 * hisi_qm_put_dfx_access() - Put dfx access.
6285 * @qm: pointer to accelerator device.
6287 * Put dfx access, drop runtime PM usage counter.
6289 void hisi_qm_put_dfx_access(struct hisi_qm *qm)
6293 EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access);
6296 * hisi_qm_pm_init() - Initialize qm runtime PM.
6297 * @qm: pointer to accelerator device.
6299 * Function that initialize qm runtime PM.
6301 void hisi_qm_pm_init(struct hisi_qm *qm)
6303 struct device *dev = &qm->pdev->dev;
6305 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
6308 pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY);
6309 pm_runtime_use_autosuspend(dev);
6310 pm_runtime_put_noidle(dev);
6312 EXPORT_SYMBOL_GPL(hisi_qm_pm_init);
6315 * hisi_qm_pm_uninit() - Uninitialize qm runtime PM.
6316 * @qm: pointer to accelerator device.
6318 * Function that uninitialize qm runtime PM.
6320 void hisi_qm_pm_uninit(struct hisi_qm *qm)
6322 struct device *dev = &qm->pdev->dev;
6324 if (qm->fun_type == QM_HW_VF || qm->ver < QM_HW_V3)
6327 pm_runtime_get_noresume(dev);
6328 pm_runtime_dont_use_autosuspend(dev);
6330 EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit);
6332 static int qm_prepare_for_suspend(struct hisi_qm *qm)
6334 struct pci_dev *pdev = qm->pdev;
6338 ret = qm->ops->set_msi(qm, false);
6340 pci_err(pdev, "failed to disable MSI before suspending!\n");
6344 /* shutdown OOO register */
6345 writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
6346 qm->io_base + ACC_MASTER_GLOBAL_CTRL);
6348 ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
6350 (val == ACC_MASTER_TRANS_RETURN_RW),
6351 POLL_PERIOD, POLL_TIMEOUT);
6353 pci_emerg(pdev, "Bus lock! Please reset system.\n");
6357 ret = qm_set_pf_mse(qm, false);
6359 pci_err(pdev, "failed to disable MSE before suspending!\n");
6364 static int qm_rebuild_for_resume(struct hisi_qm *qm)
6366 struct pci_dev *pdev = qm->pdev;
6369 ret = qm_set_pf_mse(qm, true);
6371 pci_err(pdev, "failed to enable MSE after resuming!\n");
6375 ret = qm->ops->set_msi(qm, true);
6377 pci_err(pdev, "failed to enable MSI after resuming!\n");
6381 ret = qm_dev_hw_init(qm);
6383 pci_err(pdev, "failed to init device after resuming\n");
6388 hisi_qm_dev_err_init(qm);
6389 qm_disable_clock_gate(qm);
6390 ret = qm_dev_mem_reset(qm);
6392 pci_err(pdev, "failed to reset device memory\n");
6398 * hisi_qm_suspend() - Runtime suspend of given device.
6399 * @dev: device to suspend.
6401 * Function that suspend the device.
6403 int hisi_qm_suspend(struct device *dev)
6405 struct pci_dev *pdev = to_pci_dev(dev);
6406 struct hisi_qm *qm = pci_get_drvdata(pdev);
6409 pci_info(pdev, "entering suspended state\n");
6411 ret = hisi_qm_stop(qm, QM_NORMAL);
6413 pci_err(pdev, "failed to stop qm(%d)\n", ret);
6417 ret = qm_prepare_for_suspend(qm);
6419 pci_err(pdev, "failed to prepare suspended(%d)\n", ret);
6423 EXPORT_SYMBOL_GPL(hisi_qm_suspend);
6426 * hisi_qm_resume() - Runtime resume of given device.
6427 * @dev: device to resume.
6429 * Function that resume the device.
6431 int hisi_qm_resume(struct device *dev)
6433 struct pci_dev *pdev = to_pci_dev(dev);
6434 struct hisi_qm *qm = pci_get_drvdata(pdev);
6437 pci_info(pdev, "resuming from suspend state\n");
6439 ret = qm_rebuild_for_resume(qm);
6441 pci_err(pdev, "failed to rebuild resume(%d)\n", ret);
6445 ret = hisi_qm_start(qm);
6447 pci_err(pdev, "failed to start qm(%d)\n", ret);
6451 EXPORT_SYMBOL_GPL(hisi_qm_resume);
6453 MODULE_LICENSE("GPL v2");
6454 MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
6455 MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");