2 #include <linux/printk.h>
3 #include <linux/slab.h>
5 #include "nitrox_dev.h"
6 #include "nitrox_csr.h"
7 #include "nitrox_common.h"
9 #define NR_RING_VECTORS 3
10 #define NPS_CORE_INT_ACTIVE_ENTRY 192
13 * nps_pkt_slc_isr - IRQ handler for NPS solicit port
17 static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
19 struct bh_data *slc = data;
20 union nps_pkt_slc_cnts pkt_slc_cnts;
22 pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr);
23 /* New packet on SLC output port */
24 if (pkt_slc_cnts.s.slc_int)
25 tasklet_hi_schedule(&slc->resp_handler);
30 static void clear_nps_core_err_intr(struct nitrox_device *ndev)
34 /* Write 1 to clear */
35 value = nitrox_read_csr(ndev, NPS_CORE_INT);
36 nitrox_write_csr(ndev, NPS_CORE_INT, value);
38 dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT 0x%016llx\n", value);
41 static void clear_nps_pkt_err_intr(struct nitrox_device *ndev)
43 union nps_pkt_int pkt_int;
44 unsigned long value, offset;
47 pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT);
48 dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT 0x%016llx\n",
51 if (pkt_int.s.slc_err) {
52 offset = NPS_PKT_SLC_ERR_TYPE;
53 value = nitrox_read_csr(ndev, offset);
54 nitrox_write_csr(ndev, offset, value);
55 dev_err_ratelimited(DEV(ndev),
56 "NPS_PKT_SLC_ERR_TYPE 0x%016lx\n", value);
58 offset = NPS_PKT_SLC_RERR_LO;
59 value = nitrox_read_csr(ndev, offset);
60 nitrox_write_csr(ndev, offset, value);
61 /* enable the solicit ports */
62 for_each_set_bit(i, &value, BITS_PER_LONG)
63 enable_pkt_solicit_port(ndev, i);
65 dev_err_ratelimited(DEV(ndev),
66 "NPS_PKT_SLC_RERR_LO 0x%016lx\n", value);
68 offset = NPS_PKT_SLC_RERR_HI;
69 value = nitrox_read_csr(ndev, offset);
70 nitrox_write_csr(ndev, offset, value);
71 dev_err_ratelimited(DEV(ndev),
72 "NPS_PKT_SLC_RERR_HI 0x%016lx\n", value);
75 if (pkt_int.s.in_err) {
76 offset = NPS_PKT_IN_ERR_TYPE;
77 value = nitrox_read_csr(ndev, offset);
78 nitrox_write_csr(ndev, offset, value);
79 dev_err_ratelimited(DEV(ndev),
80 "NPS_PKT_IN_ERR_TYPE 0x%016lx\n", value);
81 offset = NPS_PKT_IN_RERR_LO;
82 value = nitrox_read_csr(ndev, offset);
83 nitrox_write_csr(ndev, offset, value);
84 /* enable the input ring */
85 for_each_set_bit(i, &value, BITS_PER_LONG)
86 enable_pkt_input_ring(ndev, i);
88 dev_err_ratelimited(DEV(ndev),
89 "NPS_PKT_IN_RERR_LO 0x%016lx\n", value);
91 offset = NPS_PKT_IN_RERR_HI;
92 value = nitrox_read_csr(ndev, offset);
93 nitrox_write_csr(ndev, offset, value);
94 dev_err_ratelimited(DEV(ndev),
95 "NPS_PKT_IN_RERR_HI 0x%016lx\n", value);
99 static void clear_pom_err_intr(struct nitrox_device *ndev)
103 value = nitrox_read_csr(ndev, POM_INT);
104 nitrox_write_csr(ndev, POM_INT, value);
105 dev_err_ratelimited(DEV(ndev), "POM_INT 0x%016llx\n", value);
108 static void clear_pem_err_intr(struct nitrox_device *ndev)
112 value = nitrox_read_csr(ndev, PEM0_INT);
113 nitrox_write_csr(ndev, PEM0_INT, value);
114 dev_err_ratelimited(DEV(ndev), "PEM(0)_INT 0x%016llx\n", value);
117 static void clear_lbc_err_intr(struct nitrox_device *ndev)
119 union lbc_int lbc_int;
123 lbc_int.value = nitrox_read_csr(ndev, LBC_INT);
124 dev_err_ratelimited(DEV(ndev), "LBC_INT 0x%016llx\n", lbc_int.value);
126 if (lbc_int.s.dma_rd_err) {
127 for (i = 0; i < NR_CLUSTERS; i++) {
128 offset = EFL_CORE_VF_ERR_INT0X(i);
129 value = nitrox_read_csr(ndev, offset);
130 nitrox_write_csr(ndev, offset, value);
131 offset = EFL_CORE_VF_ERR_INT1X(i);
132 value = nitrox_read_csr(ndev, offset);
133 nitrox_write_csr(ndev, offset, value);
137 if (lbc_int.s.cam_soft_err) {
138 dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n");
139 invalidate_lbc(ndev);
142 if (lbc_int.s.pref_dat_len_mismatch_err) {
143 offset = LBC_PLM_VF1_64_INT;
144 value = nitrox_read_csr(ndev, offset);
145 nitrox_write_csr(ndev, offset, value);
146 offset = LBC_PLM_VF65_128_INT;
147 value = nitrox_read_csr(ndev, offset);
148 nitrox_write_csr(ndev, offset, value);
151 if (lbc_int.s.rd_dat_len_mismatch_err) {
152 offset = LBC_ELM_VF1_64_INT;
153 value = nitrox_read_csr(ndev, offset);
154 nitrox_write_csr(ndev, offset, value);
155 offset = LBC_ELM_VF65_128_INT;
156 value = nitrox_read_csr(ndev, offset);
157 nitrox_write_csr(ndev, offset, value);
159 nitrox_write_csr(ndev, LBC_INT, lbc_int.value);
162 static void clear_efl_err_intr(struct nitrox_device *ndev)
166 for (i = 0; i < NR_CLUSTERS; i++) {
167 union efl_core_int core_int;
170 offset = EFL_CORE_INTX(i);
171 core_int.value = nitrox_read_csr(ndev, offset);
172 nitrox_write_csr(ndev, offset, core_int.value);
173 dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT 0x%016llx\n",
175 if (core_int.s.se_err) {
176 offset = EFL_CORE_SE_ERR_INTX(i);
177 value = nitrox_read_csr(ndev, offset);
178 nitrox_write_csr(ndev, offset, value);
183 static void clear_bmi_err_intr(struct nitrox_device *ndev)
187 value = nitrox_read_csr(ndev, BMI_INT);
188 nitrox_write_csr(ndev, BMI_INT, value);
189 dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
193 * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
194 * @ndev: NITROX device
196 static void clear_nps_core_int_active(struct nitrox_device *ndev)
198 union nps_core_int_active core_int_active;
200 core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
202 if (core_int_active.s.nps_core)
203 clear_nps_core_err_intr(ndev);
205 if (core_int_active.s.nps_pkt)
206 clear_nps_pkt_err_intr(ndev);
208 if (core_int_active.s.pom)
209 clear_pom_err_intr(ndev);
211 if (core_int_active.s.pem)
212 clear_pem_err_intr(ndev);
214 if (core_int_active.s.lbc)
215 clear_lbc_err_intr(ndev);
217 if (core_int_active.s.efl)
218 clear_efl_err_intr(ndev);
220 if (core_int_active.s.bmi)
221 clear_bmi_err_intr(ndev);
223 /* If more work callback the ISR, set resend */
224 core_int_active.s.resend = 1;
225 nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
228 static irqreturn_t nps_core_int_isr(int irq, void *data)
230 struct nitrox_device *ndev = data;
232 clear_nps_core_int_active(ndev);
237 static int nitrox_enable_msix(struct nitrox_device *ndev)
239 struct msix_entry *entries;
241 int i, nr_entries, ret;
246 * Entry 0: NPS PKT ring 0
247 * Entry 1: AQMQ ring 0
248 * Entry 2: ZQM ring 0
249 * Entry 3: NPS PKT ring 1
250 * Entry 4: AQMQ ring 1
251 * Entry 5: ZQM ring 1
253 * Entry 192: NPS_CORE_INT_ACTIVE
255 nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
256 entries = kzalloc_node(nr_entries * sizeof(struct msix_entry),
257 GFP_KERNEL, ndev->node);
261 names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
268 for (i = 0; i < (nr_entries - 1); i++)
269 entries[i].entry = i;
271 entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
273 for (i = 0; i < nr_entries; i++) {
274 *(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
275 if (!(*(names + i))) {
280 ndev->msix.entries = entries;
281 ndev->msix.names = names;
282 ndev->msix.nr_entries = nr_entries;
284 ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
285 ndev->msix.nr_entries);
287 dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
294 for (i = 0; i < nr_entries; i++)
302 static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
309 for (i = 0; i < ndev->nr_queues; i++) {
310 struct bh_data *bh = &ndev->bh.slc[i];
312 tasklet_disable(&bh->resp_handler);
313 tasklet_kill(&bh->resp_handler);
319 static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
324 size = ndev->nr_queues * sizeof(struct bh_data);
325 ndev->bh.slc = kzalloc(size, GFP_KERNEL);
329 for (i = 0; i < ndev->nr_queues; i++) {
330 struct bh_data *bh = &ndev->bh.slc[i];
333 offset = NPS_PKT_SLC_CNTSX(i);
334 /* pre calculate completion count address */
335 bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
336 bh->cmdq = &ndev->pkt_cmdqs[i];
338 tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
345 static int nitrox_request_irqs(struct nitrox_device *ndev)
347 struct pci_dev *pdev = ndev->pdev;
348 struct msix_entry *msix_ent = ndev->msix.entries;
349 int nr_ring_vectors, i = 0, ring, cpu, ret;
355 * Entry 0: NPS PKT ring 0
356 * Entry 1: AQMQ ring 0
357 * Entry 2: ZQM ring 0
358 * Entry 3: NPS PKT ring 1
360 * Entry 192: NPS_CORE_INT_ACTIVE
362 nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
364 /* request irq for pkt ring/ports only */
365 while (i < nr_ring_vectors) {
366 name = *(ndev->msix.names + i);
367 ring = (i / NR_RING_VECTORS);
368 snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
371 ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
372 name, &ndev->bh.slc[ring]);
374 dev_err(&pdev->dev, "failed to get irq %d for %s\n",
375 msix_ent[i].vector, name);
378 cpu = ring % num_online_cpus();
379 irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
381 set_bit(i, ndev->msix.irqs);
382 i += NR_RING_VECTORS;
385 /* Request IRQ for NPS_CORE_INT_ACTIVE */
386 name = *(ndev->msix.names + i);
387 snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
388 ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
390 dev_err(&pdev->dev, "failed to get irq %d for %s\n",
391 msix_ent[i].vector, name);
394 set_bit(i, ndev->msix.irqs);
399 static void nitrox_disable_msix(struct nitrox_device *ndev)
401 struct msix_entry *msix_ent = ndev->msix.entries;
402 char **names = ndev->msix.names;
403 int i = 0, ring, nr_ring_vectors;
405 nr_ring_vectors = ndev->msix.nr_entries - 1;
407 /* clear pkt ring irqs */
408 while (i < nr_ring_vectors) {
409 if (test_and_clear_bit(i, ndev->msix.irqs)) {
410 ring = (i / NR_RING_VECTORS);
411 irq_set_affinity_hint(msix_ent[i].vector, NULL);
412 free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
414 i += NR_RING_VECTORS;
416 irq_set_affinity_hint(msix_ent[i].vector, NULL);
417 free_irq(msix_ent[i].vector, ndev);
418 clear_bit(i, ndev->msix.irqs);
420 kfree(ndev->msix.entries);
421 for (i = 0; i < ndev->msix.nr_entries; i++)
425 pci_disable_msix(ndev->pdev);
429 * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
430 * @ndev: NITROX device
432 void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
434 nitrox_disable_msix(ndev);
435 nitrox_cleanup_pkt_slc_bh(ndev);
439 * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ
440 * @ndev: NITROX device
442 * Return: 0 on success, a negative value on failure.
444 int nitrox_pf_init_isr(struct nitrox_device *ndev)
448 err = nitrox_setup_pkt_slc_bh(ndev);
452 err = nitrox_enable_msix(ndev);
456 err = nitrox_request_irqs(ndev);
463 nitrox_disable_msix(ndev);
465 nitrox_cleanup_pkt_slc_bh(ndev);