2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 * This file contains all of the code that is specific to the HFI chip
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75 module_param(num_vls, uint, S_IRUGO);
76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
79 * Default time to aggregate two 10K packets from the idle state
80 * (timer not running). The timer starts at the end of the first packet,
81 * so only the time for one 10K packet and header plus a bit extra is needed.
82 * 10 * 1024 + 64 header byte = 10304 byte
83 * 10304 byte / 12.5 GB/s = 824.32ns
85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86 module_param(rcv_intr_timeout, uint, S_IRUGO);
87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
89 uint rcv_intr_count = 16; /* same as qib */
90 module_param(rcv_intr_count, uint, S_IRUGO);
91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
93 ushort link_crc_mask = SUPPORTED_CRCS;
94 module_param(link_crc_mask, ushort, S_IRUGO);
95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
98 module_param_named(loopback, loopback, uint, S_IRUGO);
99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
101 /* Other driver tunables */
102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103 static ushort crc_14b_sideband = 1;
104 static uint use_flr = 1;
105 uint quick_linkup; /* skip LNI */
108 u64 flag; /* the flag */
109 char *str; /* description string */
110 u16 extra; /* extra information */
115 /* str must be a string constant */
116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
119 /* Send Error Consequences */
120 #define SEC_WRITE_DROPPED 0x1
121 #define SEC_PACKET_DROPPED 0x2
122 #define SEC_SC_HALTED 0x4 /* per-context only */
123 #define SEC_SPC_FREEZE 0x8 /* per-HFI only */
125 #define DEFAULT_KRCVQS 2
126 #define MIN_KERNEL_KCTXTS 2
127 #define FIRST_KERNEL_KCTXT 1
130 * RSM instance allocation
132 * 1 - User Fecn Handling
135 #define RSM_INS_VERBS 0
136 #define RSM_INS_FECN 1
137 #define RSM_INS_VNIC 2
139 /* Bit offset into the GUID which carries HFI id information */
140 #define GUID_HFI_INDEX_SHIFT 39
142 /* extract the emulation revision */
143 #define emulator_rev(dd) ((dd)->irev >> 8)
144 /* parallel and serial emulation versions are 3 and 4 respectively */
145 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
146 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
148 /* RSM fields for Verbs */
150 #define IB_PACKET_TYPE 2ull
151 #define QW_SHIFT 6ull
153 #define QPN_WIDTH 7ull
155 /* LRH.BTH: QW 0, OFFSET 48 - for match */
156 #define LRH_BTH_QW 0ull
157 #define LRH_BTH_BIT_OFFSET 48ull
158 #define LRH_BTH_OFFSET(off) ((LRH_BTH_QW << QW_SHIFT) | (off))
159 #define LRH_BTH_MATCH_OFFSET LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
160 #define LRH_BTH_SELECT
161 #define LRH_BTH_MASK 3ull
162 #define LRH_BTH_VALUE 2ull
164 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
165 #define LRH_SC_QW 0ull
166 #define LRH_SC_BIT_OFFSET 56ull
167 #define LRH_SC_OFFSET(off) ((LRH_SC_QW << QW_SHIFT) | (off))
168 #define LRH_SC_MATCH_OFFSET LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
169 #define LRH_SC_MASK 128ull
170 #define LRH_SC_VALUE 0ull
172 /* SC[n..0] QW 0, OFFSET 60 - for select */
173 #define LRH_SC_SELECT_OFFSET ((LRH_SC_QW << QW_SHIFT) | (60ull))
175 /* QPN[m+n:1] QW 1, OFFSET 1 */
176 #define QPN_SELECT_OFFSET ((1ull << QW_SHIFT) | (1ull))
178 /* RSM fields for Vnic */
179 /* L2_TYPE: QW 0, OFFSET 61 - for match */
180 #define L2_TYPE_QW 0ull
181 #define L2_TYPE_BIT_OFFSET 61ull
182 #define L2_TYPE_OFFSET(off) ((L2_TYPE_QW << QW_SHIFT) | (off))
183 #define L2_TYPE_MATCH_OFFSET L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
184 #define L2_TYPE_MASK 3ull
185 #define L2_16B_VALUE 2ull
187 /* L4_TYPE QW 1, OFFSET 0 - for match */
188 #define L4_TYPE_QW 1ull
189 #define L4_TYPE_BIT_OFFSET 0ull
190 #define L4_TYPE_OFFSET(off) ((L4_TYPE_QW << QW_SHIFT) | (off))
191 #define L4_TYPE_MATCH_OFFSET L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
192 #define L4_16B_TYPE_MASK 0xFFull
193 #define L4_16B_ETH_VALUE 0x78ull
195 /* 16B VESWID - for select */
196 #define L4_16B_HDR_VESWID_OFFSET ((2 << QW_SHIFT) | (16ull))
197 /* 16B ENTROPY - for select */
198 #define L2_16B_ENTROPY_OFFSET ((1 << QW_SHIFT) | (32ull))
200 /* defines to build power on SC2VL table */
212 ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
213 ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
214 ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
215 ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
216 ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
217 ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
218 ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
219 ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT) \
222 #define DC_SC_VL_VAL( \
241 ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
242 ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
243 ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
244 ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
245 ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
246 ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
247 ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
248 ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
249 ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
250 ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
251 ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
252 ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
253 ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
254 ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
255 ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
256 ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
259 /* all CceStatus sub-block freeze bits */
260 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
261 | CCE_STATUS_RXE_FROZE_SMASK \
262 | CCE_STATUS_TXE_FROZE_SMASK \
263 | CCE_STATUS_TXE_PIO_FROZE_SMASK)
264 /* all CceStatus sub-block TXE pause bits */
265 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
266 | CCE_STATUS_TXE_PAUSED_SMASK \
267 | CCE_STATUS_SDMA_PAUSED_SMASK)
268 /* all CceStatus sub-block RXE pause bits */
269 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
271 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
272 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
277 static struct flag_table cce_err_status_flags[] = {
278 /* 0*/ FLAG_ENTRY0("CceCsrParityErr",
279 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
280 /* 1*/ FLAG_ENTRY0("CceCsrReadBadAddrErr",
281 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
282 /* 2*/ FLAG_ENTRY0("CceCsrWriteBadAddrErr",
283 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
284 /* 3*/ FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
285 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
286 /* 4*/ FLAG_ENTRY0("CceTrgtAccessErr",
287 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
288 /* 5*/ FLAG_ENTRY0("CceRspdDataParityErr",
289 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
290 /* 6*/ FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
291 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
292 /* 7*/ FLAG_ENTRY0("CceCsrCfgBusParityErr",
293 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
294 /* 8*/ FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
295 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
296 /* 9*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
297 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
298 /*10*/ FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
299 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
300 /*11*/ FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
301 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
302 /*12*/ FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
303 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
304 /*13*/ FLAG_ENTRY0("PcicRetryMemCorErr",
305 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
306 /*14*/ FLAG_ENTRY0("PcicRetryMemCorErr",
307 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
308 /*15*/ FLAG_ENTRY0("PcicPostHdQCorErr",
309 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
310 /*16*/ FLAG_ENTRY0("PcicPostHdQCorErr",
311 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
312 /*17*/ FLAG_ENTRY0("PcicPostHdQCorErr",
313 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
314 /*18*/ FLAG_ENTRY0("PcicCplDatQCorErr",
315 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
316 /*19*/ FLAG_ENTRY0("PcicNPostHQParityErr",
317 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
318 /*20*/ FLAG_ENTRY0("PcicNPostDatQParityErr",
319 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
320 /*21*/ FLAG_ENTRY0("PcicRetryMemUncErr",
321 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
322 /*22*/ FLAG_ENTRY0("PcicRetrySotMemUncErr",
323 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
324 /*23*/ FLAG_ENTRY0("PcicPostHdQUncErr",
325 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
326 /*24*/ FLAG_ENTRY0("PcicPostDatQUncErr",
327 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
328 /*25*/ FLAG_ENTRY0("PcicCplHdQUncErr",
329 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
330 /*26*/ FLAG_ENTRY0("PcicCplDatQUncErr",
331 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
332 /*27*/ FLAG_ENTRY0("PcicTransmitFrontParityErr",
333 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
334 /*28*/ FLAG_ENTRY0("PcicTransmitBackParityErr",
335 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
336 /*29*/ FLAG_ENTRY0("PcicReceiveParityErr",
337 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
338 /*30*/ FLAG_ENTRY0("CceTrgtCplTimeoutErr",
339 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
340 /*31*/ FLAG_ENTRY0("LATriggered",
341 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
342 /*32*/ FLAG_ENTRY0("CceSegReadBadAddrErr",
343 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
344 /*33*/ FLAG_ENTRY0("CceSegWriteBadAddrErr",
345 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
346 /*34*/ FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
347 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
348 /*35*/ FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
349 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
350 /*36*/ FLAG_ENTRY0("CceMsixTableCorErr",
351 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
352 /*37*/ FLAG_ENTRY0("CceMsixTableUncErr",
353 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
354 /*38*/ FLAG_ENTRY0("CceIntMapCorErr",
355 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
356 /*39*/ FLAG_ENTRY0("CceIntMapUncErr",
357 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
358 /*40*/ FLAG_ENTRY0("CceMsixCsrParityErr",
359 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
366 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
367 static struct flag_table misc_err_status_flags[] = {
368 /* 0*/ FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
369 /* 1*/ FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
370 /* 2*/ FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
371 /* 3*/ FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
372 /* 4*/ FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
373 /* 5*/ FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
374 /* 6*/ FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
375 /* 7*/ FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
376 /* 8*/ FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
377 /* 9*/ FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
378 /*10*/ FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
379 /*11*/ FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
380 /*12*/ FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
384 * TXE PIO Error flags and consequences
386 static struct flag_table pio_err_status_flags[] = {
387 /* 0*/ FLAG_ENTRY("PioWriteBadCtxt",
389 SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
390 /* 1*/ FLAG_ENTRY("PioWriteAddrParity",
392 SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
393 /* 2*/ FLAG_ENTRY("PioCsrParity",
395 SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
396 /* 3*/ FLAG_ENTRY("PioSbMemFifo0",
398 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
399 /* 4*/ FLAG_ENTRY("PioSbMemFifo1",
401 SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
402 /* 5*/ FLAG_ENTRY("PioPccFifoParity",
404 SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
405 /* 6*/ FLAG_ENTRY("PioPecFifoParity",
407 SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
408 /* 7*/ FLAG_ENTRY("PioSbrdctlCrrelParity",
410 SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
411 /* 8*/ FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
413 SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
414 /* 9*/ FLAG_ENTRY("PioPktEvictFifoParityErr",
416 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
417 /*10*/ FLAG_ENTRY("PioSmPktResetParity",
419 SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
420 /*11*/ FLAG_ENTRY("PioVlLenMemBank0Unc",
422 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
423 /*12*/ FLAG_ENTRY("PioVlLenMemBank1Unc",
425 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
426 /*13*/ FLAG_ENTRY("PioVlLenMemBank0Cor",
428 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
429 /*14*/ FLAG_ENTRY("PioVlLenMemBank1Cor",
431 SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
432 /*15*/ FLAG_ENTRY("PioCreditRetFifoParity",
434 SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
435 /*16*/ FLAG_ENTRY("PioPpmcPblFifo",
437 SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
438 /*17*/ FLAG_ENTRY("PioInitSmIn",
440 SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
441 /*18*/ FLAG_ENTRY("PioPktEvictSmOrArbSm",
443 SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
444 /*19*/ FLAG_ENTRY("PioHostAddrMemUnc",
446 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
447 /*20*/ FLAG_ENTRY("PioHostAddrMemCor",
449 SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
450 /*21*/ FLAG_ENTRY("PioWriteDataParity",
452 SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
453 /*22*/ FLAG_ENTRY("PioStateMachine",
455 SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
456 /*23*/ FLAG_ENTRY("PioWriteQwValidParity",
457 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
458 SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
459 /*24*/ FLAG_ENTRY("PioBlockQwCountParity",
460 SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
461 SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
462 /*25*/ FLAG_ENTRY("PioVlfVlLenParity",
464 SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
465 /*26*/ FLAG_ENTRY("PioVlfSopParity",
467 SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
468 /*27*/ FLAG_ENTRY("PioVlFifoParity",
470 SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
471 /*28*/ FLAG_ENTRY("PioPpmcBqcMemParity",
473 SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
474 /*29*/ FLAG_ENTRY("PioPpmcSopLen",
476 SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
478 /*32*/ FLAG_ENTRY("PioCurrentFreeCntParity",
480 SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
481 /*33*/ FLAG_ENTRY("PioLastReturnedCntParity",
483 SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
484 /*34*/ FLAG_ENTRY("PioPccSopHeadParity",
486 SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
487 /*35*/ FLAG_ENTRY("PioPecSopHeadParityErr",
489 SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
493 /* TXE PIO errors that cause an SPC freeze */
494 #define ALL_PIO_FREEZE_ERR \
495 (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
496 | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
497 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
498 | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
499 | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
500 | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
501 | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
502 | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
503 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
504 | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
505 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
506 | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
507 | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
508 | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
509 | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
510 | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
511 | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
512 | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
513 | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
514 | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
515 | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
516 | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
517 | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
518 | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
519 | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
520 | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
521 | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
522 | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
523 | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
526 * TXE SDMA Error flags
528 static struct flag_table sdma_err_status_flags[] = {
529 /* 0*/ FLAG_ENTRY0("SDmaRpyTagErr",
530 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
531 /* 1*/ FLAG_ENTRY0("SDmaCsrParityErr",
532 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
533 /* 2*/ FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
534 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
535 /* 3*/ FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
536 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
540 /* TXE SDMA errors that cause an SPC freeze */
541 #define ALL_SDMA_FREEZE_ERR \
542 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
543 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
544 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
546 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
547 #define PORT_DISCARD_EGRESS_ERRS \
548 (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
549 | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
550 | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
553 * TXE Egress Error flags
555 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
556 static struct flag_table egress_err_status_flags[] = {
557 /* 0*/ FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
558 /* 1*/ FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
560 /* 3*/ FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
561 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
562 /* 4*/ FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
563 /* 5*/ FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
565 /* 7*/ FLAG_ENTRY0("TxPioLaunchIntfParityErr",
566 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
567 /* 8*/ FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
568 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
570 /*11*/ FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
571 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
572 /*12*/ FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
573 /*13*/ FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
574 /*14*/ FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
575 /*15*/ FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
576 /*16*/ FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
577 SEES(TX_SDMA0_DISALLOWED_PACKET)),
578 /*17*/ FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
579 SEES(TX_SDMA1_DISALLOWED_PACKET)),
580 /*18*/ FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
581 SEES(TX_SDMA2_DISALLOWED_PACKET)),
582 /*19*/ FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
583 SEES(TX_SDMA3_DISALLOWED_PACKET)),
584 /*20*/ FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
585 SEES(TX_SDMA4_DISALLOWED_PACKET)),
586 /*21*/ FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
587 SEES(TX_SDMA5_DISALLOWED_PACKET)),
588 /*22*/ FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
589 SEES(TX_SDMA6_DISALLOWED_PACKET)),
590 /*23*/ FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
591 SEES(TX_SDMA7_DISALLOWED_PACKET)),
592 /*24*/ FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
593 SEES(TX_SDMA8_DISALLOWED_PACKET)),
594 /*25*/ FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
595 SEES(TX_SDMA9_DISALLOWED_PACKET)),
596 /*26*/ FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
597 SEES(TX_SDMA10_DISALLOWED_PACKET)),
598 /*27*/ FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
599 SEES(TX_SDMA11_DISALLOWED_PACKET)),
600 /*28*/ FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
601 SEES(TX_SDMA12_DISALLOWED_PACKET)),
602 /*29*/ FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
603 SEES(TX_SDMA13_DISALLOWED_PACKET)),
604 /*30*/ FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
605 SEES(TX_SDMA14_DISALLOWED_PACKET)),
606 /*31*/ FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
607 SEES(TX_SDMA15_DISALLOWED_PACKET)),
608 /*32*/ FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
609 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
610 /*33*/ FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
611 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
612 /*34*/ FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
613 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
614 /*35*/ FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
615 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
616 /*36*/ FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
617 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
618 /*37*/ FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
619 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
620 /*38*/ FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
621 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
622 /*39*/ FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
623 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
624 /*40*/ FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
625 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
626 /*41*/ FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
627 /*42*/ FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
628 /*43*/ FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
629 /*44*/ FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
630 /*45*/ FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
631 /*46*/ FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
632 /*47*/ FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
633 /*48*/ FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
634 /*49*/ FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
635 /*50*/ FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
636 /*51*/ FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
637 /*52*/ FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
638 /*53*/ FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
639 /*54*/ FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
640 /*55*/ FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
641 /*56*/ FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
642 /*57*/ FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
643 /*58*/ FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
644 /*59*/ FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
645 /*60*/ FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
646 /*61*/ FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
647 /*62*/ FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
648 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
649 /*63*/ FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
650 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
654 * TXE Egress Error Info flags
656 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
657 static struct flag_table egress_err_info_flags[] = {
658 /* 0*/ FLAG_ENTRY0("Reserved", 0ull),
659 /* 1*/ FLAG_ENTRY0("VLErr", SEEI(VL)),
660 /* 2*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
661 /* 3*/ FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662 /* 4*/ FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
663 /* 5*/ FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
664 /* 6*/ FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
665 /* 7*/ FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
666 /* 8*/ FLAG_ENTRY0("RawErr", SEEI(RAW)),
667 /* 9*/ FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
668 /*10*/ FLAG_ENTRY0("GRHErr", SEEI(GRH)),
669 /*11*/ FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
670 /*12*/ FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
671 /*13*/ FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
672 /*14*/ FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
673 /*15*/ FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
674 /*16*/ FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
675 /*17*/ FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
676 /*18*/ FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
677 /*19*/ FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
678 /*20*/ FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
679 /*21*/ FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
682 /* TXE Egress errors that cause an SPC freeze */
683 #define ALL_TXE_EGRESS_FREEZE_ERR \
684 (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
685 | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
686 | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
687 | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
688 | SEES(TX_LAUNCH_CSR_PARITY) \
689 | SEES(TX_SBRD_CTL_CSR_PARITY) \
690 | SEES(TX_CONFIG_PARITY) \
691 | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
692 | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
693 | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
694 | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
695 | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
696 | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
697 | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
698 | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
699 | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
700 | SEES(TX_CREDIT_RETURN_PARITY))
703 * TXE Send error flags
705 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
706 static struct flag_table send_err_status_flags[] = {
707 /* 0*/ FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
708 /* 1*/ FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
709 /* 2*/ FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
713 * TXE Send Context Error flags and consequences
715 static struct flag_table sc_err_status_flags[] = {
716 /* 0*/ FLAG_ENTRY("InconsistentSop",
717 SEC_PACKET_DROPPED | SEC_SC_HALTED,
718 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
719 /* 1*/ FLAG_ENTRY("DisallowedPacket",
720 SEC_PACKET_DROPPED | SEC_SC_HALTED,
721 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
722 /* 2*/ FLAG_ENTRY("WriteCrossesBoundary",
723 SEC_WRITE_DROPPED | SEC_SC_HALTED,
724 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
725 /* 3*/ FLAG_ENTRY("WriteOverflow",
726 SEC_WRITE_DROPPED | SEC_SC_HALTED,
727 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
728 /* 4*/ FLAG_ENTRY("WriteOutOfBounds",
729 SEC_WRITE_DROPPED | SEC_SC_HALTED,
730 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
735 * RXE Receive Error flags
737 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
738 static struct flag_table rxe_err_status_flags[] = {
739 /* 0*/ FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
740 /* 1*/ FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
741 /* 2*/ FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
742 /* 3*/ FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
743 /* 4*/ FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
744 /* 5*/ FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
745 /* 6*/ FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
746 /* 7*/ FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
747 /* 8*/ FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
748 /* 9*/ FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
749 /*10*/ FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
750 /*11*/ FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
751 /*12*/ FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
752 /*13*/ FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
753 /*14*/ FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
754 /*15*/ FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
755 /*16*/ FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
756 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
757 /*17*/ FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
758 /*18*/ FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
759 /*19*/ FLAG_ENTRY0("RxRbufBlockListReadUncErr",
760 RXES(RBUF_BLOCK_LIST_READ_UNC)),
761 /*20*/ FLAG_ENTRY0("RxRbufBlockListReadCorErr",
762 RXES(RBUF_BLOCK_LIST_READ_COR)),
763 /*21*/ FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
764 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
765 /*22*/ FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
766 RXES(RBUF_CSR_QENT_CNT_PARITY)),
767 /*23*/ FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
768 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
769 /*24*/ FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
770 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
771 /*25*/ FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
772 /*26*/ FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
773 /*27*/ FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
774 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
775 /*28*/ FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
776 /*29*/ FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
777 /*30*/ FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
778 /*31*/ FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
779 /*32*/ FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
780 /*33*/ FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
781 /*34*/ FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
782 /*35*/ FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
783 RXES(RBUF_FL_INITDONE_PARITY)),
784 /*36*/ FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
785 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
786 /*37*/ FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
787 /*38*/ FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
788 /*39*/ FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
789 /*40*/ FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
790 RXES(LOOKUP_DES_PART1_UNC_COR)),
791 /*41*/ FLAG_ENTRY0("RxLookupDesPart2ParityErr",
792 RXES(LOOKUP_DES_PART2_PARITY)),
793 /*42*/ FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
794 /*43*/ FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
795 /*44*/ FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
796 /*45*/ FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
797 /*46*/ FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
798 /*47*/ FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
799 /*48*/ FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
800 /*49*/ FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
801 /*50*/ FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
802 /*51*/ FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
803 /*52*/ FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
804 /*53*/ FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
805 /*54*/ FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
806 /*55*/ FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
807 /*56*/ FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
808 /*57*/ FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
809 /*58*/ FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
810 /*59*/ FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
811 /*60*/ FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
812 /*61*/ FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
813 /*62*/ FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
814 /*63*/ FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
817 /* RXE errors that will trigger an SPC freeze */
818 #define ALL_RXE_FREEZE_ERR \
819 (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
820 | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
821 | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
822 | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
823 | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
824 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
825 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
826 | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
827 | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
828 | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
829 | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
830 | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
831 | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
832 | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
833 | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
834 | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
835 | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
836 | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
837 | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
838 | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
839 | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
840 | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
841 | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
842 | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
843 | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
844 | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
845 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
846 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
847 | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
848 | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
849 | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
850 | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
851 | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
852 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
853 | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
854 | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
855 | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
856 | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
857 | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
858 | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
859 | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
860 | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
861 | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
862 | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
864 #define RXE_FREEZE_ABORT_MASK \
865 (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
866 RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
867 RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
872 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
873 static struct flag_table dcc_err_flags[] = {
874 FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
875 FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
876 FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
877 FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
878 FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
879 FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
880 FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
881 FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
882 FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
883 FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
884 FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
885 FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
886 FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
887 FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
888 FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
889 FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
890 FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
891 FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
892 FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
893 FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
894 FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
895 FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
896 FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
897 FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
898 FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
899 FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
900 FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
901 FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
902 FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
903 FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
904 FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
905 FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
906 FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
907 FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
908 FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
909 FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
910 FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
911 FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
912 FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
913 FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
914 FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
915 FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
916 FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
917 FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
918 FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
919 FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
925 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
926 static struct flag_table lcb_err_flags[] = {
927 /* 0*/ FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
928 /* 1*/ FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
929 /* 2*/ FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
930 /* 3*/ FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
931 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
932 /* 4*/ FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
933 /* 5*/ FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
934 /* 6*/ FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
935 /* 7*/ FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
936 /* 8*/ FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
937 /* 9*/ FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
938 /*10*/ FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
939 /*11*/ FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
940 /*12*/ FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
941 /*13*/ FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
942 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
943 /*14*/ FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
944 /*15*/ FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
945 /*16*/ FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
946 /*17*/ FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
947 /*18*/ FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
948 /*19*/ FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
949 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
950 /*20*/ FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
951 /*21*/ FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
952 /*22*/ FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
953 /*23*/ FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
954 /*24*/ FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
955 /*25*/ FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
956 /*26*/ FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
957 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
958 /*27*/ FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
959 /*28*/ FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
960 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
961 /*29*/ FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
962 LCBE(REDUNDANT_FLIT_PARITY_ERR))
968 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
969 static struct flag_table dc8051_err_flags[] = {
970 FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
971 FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
972 FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
973 FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
974 FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
975 FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
976 FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
977 FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
978 FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
979 D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
980 FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
984 * DC8051 Information Error flags
986 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
988 static struct flag_table dc8051_info_err_flags[] = {
989 FLAG_ENTRY0("Spico ROM check failed", SPICO_ROM_FAILED),
990 FLAG_ENTRY0("Unknown frame received", UNKNOWN_FRAME),
991 FLAG_ENTRY0("Target BER not met", TARGET_BER_NOT_MET),
992 FLAG_ENTRY0("Serdes internal loopback failure",
993 FAILED_SERDES_INTERNAL_LOOPBACK),
994 FLAG_ENTRY0("Failed SerDes init", FAILED_SERDES_INIT),
995 FLAG_ENTRY0("Failed LNI(Polling)", FAILED_LNI_POLLING),
996 FLAG_ENTRY0("Failed LNI(Debounce)", FAILED_LNI_DEBOUNCE),
997 FLAG_ENTRY0("Failed LNI(EstbComm)", FAILED_LNI_ESTBCOMM),
998 FLAG_ENTRY0("Failed LNI(OptEq)", FAILED_LNI_OPTEQ),
999 FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1000 FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1001 FLAG_ENTRY0("Failed LNI(ConfigLT)", FAILED_LNI_CONFIGLT),
1002 FLAG_ENTRY0("Host Handshake Timeout", HOST_HANDSHAKE_TIMEOUT),
1003 FLAG_ENTRY0("External Device Request Timeout",
1004 EXTERNAL_DEVICE_REQ_TIMEOUT),
1008 * DC8051 Information Host Information flags
1010 * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1012 static struct flag_table dc8051_info_host_msg_flags[] = {
1013 FLAG_ENTRY0("Host request done", 0x0001),
1014 FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1015 FLAG_ENTRY0("BC SMA message", 0x0004),
1016 FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1017 FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1018 FLAG_ENTRY0("External device config request", 0x0020),
1019 FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1020 FLAG_ENTRY0("LinkUp achieved", 0x0080),
1021 FLAG_ENTRY0("Link going down", 0x0100),
1022 FLAG_ENTRY0("Link width downgraded", 0x0200),
1025 static u32 encoded_size(u32 size);
1026 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1027 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1028 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1030 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1031 u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1032 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1033 u8 *remote_tx_rate, u16 *link_widths);
1034 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1035 u8 *flag_bits, u16 *link_widths);
1036 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1038 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1039 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1040 u8 *tx_polarity_inversion,
1041 u8 *rx_polarity_inversion, u8 *max_rate);
1042 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1043 unsigned int context, u64 err_status);
1044 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1045 static void handle_dcc_err(struct hfi1_devdata *dd,
1046 unsigned int context, u64 err_status);
1047 static void handle_lcb_err(struct hfi1_devdata *dd,
1048 unsigned int context, u64 err_status);
1049 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1050 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1051 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void set_partition_keys(struct hfi1_pportdata *ppd);
1058 static const char *link_state_name(u32 state);
1059 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1061 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1063 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1064 static int thermal_init(struct hfi1_devdata *dd);
1066 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1067 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1069 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1071 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1072 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1073 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1075 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1077 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1078 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1079 static void handle_temp_err(struct hfi1_devdata *dd);
1080 static void dc_shutdown(struct hfi1_devdata *dd);
1081 static void dc_start(struct hfi1_devdata *dd);
1082 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1084 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1085 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1086 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1087 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1090 * Error interrupt table entry. This is used as input to the interrupt
1091 * "clear down" routine used for all second tier error interrupt register.
1092 * Second tier interrupt registers have a single bit representing them
1093 * in the top-level CceIntStatus.
1095 struct err_reg_info {
1096 u32 status; /* status CSR offset */
1097 u32 clear; /* clear CSR offset */
1098 u32 mask; /* mask CSR offset */
1099 void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1103 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1104 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1105 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1108 * Helpers for building HFI and DC error interrupt table entries. Different
1109 * helpers are needed because of inconsistent register names.
1111 #define EE(reg, handler, desc) \
1112 { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1114 #define DC_EE1(reg, handler, desc) \
1115 { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1116 #define DC_EE2(reg, handler, desc) \
1117 { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1120 * Table of the "misc" grouping of error interrupts. Each entry refers to
1121 * another register containing more information.
1123 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1124 /* 0*/ EE(CCE_ERR, handle_cce_err, "CceErr"),
1125 /* 1*/ EE(RCV_ERR, handle_rxe_err, "RxeErr"),
1126 /* 2*/ EE(MISC_ERR, handle_misc_err, "MiscErr"),
1127 /* 3*/ { 0, 0, 0, NULL }, /* reserved */
1128 /* 4*/ EE(SEND_PIO_ERR, handle_pio_err, "PioErr"),
1129 /* 5*/ EE(SEND_DMA_ERR, handle_sdma_err, "SDmaErr"),
1130 /* 6*/ EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1131 /* 7*/ EE(SEND_ERR, handle_txe_err, "TxeErr")
1132 /* the rest are reserved */
1136 * Index into the Various section of the interrupt sources
1137 * corresponding to the Critical Temperature interrupt.
1139 #define TCRIT_INT_SOURCE 4
1142 * SDMA error interrupt entry - refers to another register containing more
1145 static const struct err_reg_info sdma_eng_err =
1146 EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1148 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1149 /* 0*/ { 0, 0, 0, NULL }, /* PbcInt */
1150 /* 1*/ { 0, 0, 0, NULL }, /* GpioAssertInt */
1151 /* 2*/ EE(ASIC_QSFP1, handle_qsfp_int, "QSFP1"),
1152 /* 3*/ EE(ASIC_QSFP2, handle_qsfp_int, "QSFP2"),
1153 /* 4*/ { 0, 0, 0, NULL }, /* TCritInt */
1154 /* rest are reserved */
1158 * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1159 * register can not be derived from the MTU value because 10K is not
1160 * a power of 2. Therefore, we need a constant. Everything else can
1163 #define DCC_CFG_PORT_MTU_CAP_10240 7
1166 * Table of the DC grouping of error interrupts. Each entry refers to
1167 * another register containing more information.
1169 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1170 /* 0*/ DC_EE1(DCC_ERR, handle_dcc_err, "DCC Err"),
1171 /* 1*/ DC_EE2(DC_LCB_ERR, handle_lcb_err, "LCB Err"),
1172 /* 2*/ DC_EE2(DC_DC8051_ERR, handle_8051_interrupt, "DC8051 Interrupt"),
1173 /* 3*/ /* dc_lbm_int - special, see is_dc_int() */
1174 /* the rest are reserved */
1184 * csr to read for name (if applicable)
1189 * offset into dd or ppd to store the counter's value
1199 * accessor for stat element, context either dd or ppd
1201 u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1202 int mode, u64 data);
1205 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1206 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1208 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1218 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1220 (counter * 8 + RCV_COUNTER_ARRAY32), \
1221 0, flags | CNTR_32BIT, \
1222 port_access_u32_csr)
1224 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1226 (counter * 8 + RCV_COUNTER_ARRAY32), \
1227 0, flags | CNTR_32BIT, \
1231 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1233 (counter * 8 + RCV_COUNTER_ARRAY64), \
1235 port_access_u64_csr)
1237 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1239 (counter * 8 + RCV_COUNTER_ARRAY64), \
1243 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1244 #define OVR_ELM(ctx) \
1245 CNTR_ELEM("RcvHdrOvr" #ctx, \
1246 (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1247 0, CNTR_NORMAL, port_access_u64_csr)
1250 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1252 (counter * 8 + SEND_COUNTER_ARRAY32), \
1253 0, flags | CNTR_32BIT, \
1254 port_access_u32_csr)
1257 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1259 (counter * 8 + SEND_COUNTER_ARRAY64), \
1261 port_access_u64_csr)
1263 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1265 counter * 8 + SEND_COUNTER_ARRAY64, \
1271 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1273 (counter * 8 + CCE_COUNTER_ARRAY32), \
1274 0, flags | CNTR_32BIT, \
1277 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1279 (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1280 0, flags | CNTR_32BIT, \
1284 #define DC_PERF_CNTR(name, counter, flags) \
1291 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1299 #define SW_IBP_CNTR(name, cntr) \
1307 * hfi_addr_from_offset - return addr for readq/writeq
1308 * @dd - the dd device
1309 * @offset - the offset of the CSR within bar0
1311 * This routine selects the appropriate base address
1312 * based on the indicated offset.
1314 static inline void __iomem *hfi1_addr_from_offset(
1315 const struct hfi1_devdata *dd,
1318 if (offset >= dd->base2_start)
1319 return dd->kregbase2 + (offset - dd->base2_start);
1320 return dd->kregbase1 + offset;
1324 * read_csr - read CSR at the indicated offset
1325 * @dd - the dd device
1326 * @offset - the offset of the CSR within bar0
1328 * Return: the value read or all FF's if there
1331 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1333 if (dd->flags & HFI1_PRESENT)
1334 return readq(hfi1_addr_from_offset(dd, offset));
1339 * write_csr - write CSR at the indicated offset
1340 * @dd - the dd device
1341 * @offset - the offset of the CSR within bar0
1342 * @value - value to write
1344 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1346 if (dd->flags & HFI1_PRESENT) {
1347 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1349 /* avoid write to RcvArray */
1350 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1352 writeq(value, base);
1357 * get_csr_addr - return te iomem address for offset
1358 * @dd - the dd device
1359 * @offset - the offset of the CSR within bar0
1361 * Return: The iomem address to use in subsequent
1362 * writeq/readq operations.
1364 void __iomem *get_csr_addr(
1365 const struct hfi1_devdata *dd,
1368 if (dd->flags & HFI1_PRESENT)
1369 return hfi1_addr_from_offset(dd, offset);
1373 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1374 int mode, u64 value)
1378 if (mode == CNTR_MODE_R) {
1379 ret = read_csr(dd, csr);
1380 } else if (mode == CNTR_MODE_W) {
1381 write_csr(dd, csr, value);
1384 dd_dev_err(dd, "Invalid cntr register access mode");
1388 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1393 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1394 void *context, int vl, int mode, u64 data)
1396 struct hfi1_devdata *dd = context;
1397 u64 csr = entry->csr;
1399 if (entry->flags & CNTR_SDMA) {
1400 if (vl == CNTR_INVALID_VL)
1404 if (vl != CNTR_INVALID_VL)
1407 return read_write_csr(dd, csr, mode, data);
1410 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1411 void *context, int idx, int mode, u64 data)
1413 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1415 if (dd->per_sdma && idx < dd->num_sdma)
1416 return dd->per_sdma[idx].err_cnt;
1420 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1421 void *context, int idx, int mode, u64 data)
1423 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1425 if (dd->per_sdma && idx < dd->num_sdma)
1426 return dd->per_sdma[idx].sdma_int_cnt;
1430 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1431 void *context, int idx, int mode, u64 data)
1433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1435 if (dd->per_sdma && idx < dd->num_sdma)
1436 return dd->per_sdma[idx].idle_int_cnt;
1440 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1441 void *context, int idx, int mode,
1444 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1446 if (dd->per_sdma && idx < dd->num_sdma)
1447 return dd->per_sdma[idx].progress_int_cnt;
1451 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1452 int vl, int mode, u64 data)
1454 struct hfi1_devdata *dd = context;
1457 u64 csr = entry->csr;
1459 if (entry->flags & CNTR_VL) {
1460 if (vl == CNTR_INVALID_VL)
1464 if (vl != CNTR_INVALID_VL)
1468 val = read_write_csr(dd, csr, mode, data);
1472 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1473 int vl, int mode, u64 data)
1475 struct hfi1_devdata *dd = context;
1476 u32 csr = entry->csr;
1479 if (vl != CNTR_INVALID_VL)
1481 if (mode == CNTR_MODE_R)
1482 ret = read_lcb_csr(dd, csr, &data);
1483 else if (mode == CNTR_MODE_W)
1484 ret = write_lcb_csr(dd, csr, data);
1487 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1491 hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1496 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1497 int vl, int mode, u64 data)
1499 struct hfi1_pportdata *ppd = context;
1501 if (vl != CNTR_INVALID_VL)
1503 return read_write_csr(ppd->dd, entry->csr, mode, data);
1506 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1507 void *context, int vl, int mode, u64 data)
1509 struct hfi1_pportdata *ppd = context;
1511 u64 csr = entry->csr;
1513 if (entry->flags & CNTR_VL) {
1514 if (vl == CNTR_INVALID_VL)
1518 if (vl != CNTR_INVALID_VL)
1521 val = read_write_csr(ppd->dd, csr, mode, data);
1525 /* Software defined */
1526 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1531 if (mode == CNTR_MODE_R) {
1533 } else if (mode == CNTR_MODE_W) {
1537 dd_dev_err(dd, "Invalid cntr sw access mode");
1541 hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1546 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1547 int vl, int mode, u64 data)
1549 struct hfi1_pportdata *ppd = context;
1551 if (vl != CNTR_INVALID_VL)
1553 return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1556 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1557 int vl, int mode, u64 data)
1559 struct hfi1_pportdata *ppd = context;
1561 if (vl != CNTR_INVALID_VL)
1563 return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1566 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1567 void *context, int vl, int mode,
1570 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1572 if (vl != CNTR_INVALID_VL)
1574 return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1577 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1578 void *context, int vl, int mode, u64 data)
1580 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1584 if (vl == CNTR_INVALID_VL)
1585 counter = &ppd->port_xmit_discards;
1586 else if (vl >= 0 && vl < C_VL_COUNT)
1587 counter = &ppd->port_xmit_discards_vl[vl];
1591 return read_write_sw(ppd->dd, counter, mode, data);
1594 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1595 void *context, int vl, int mode,
1598 struct hfi1_pportdata *ppd = context;
1600 if (vl != CNTR_INVALID_VL)
1603 return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1607 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1608 void *context, int vl, int mode, u64 data)
1610 struct hfi1_pportdata *ppd = context;
1612 if (vl != CNTR_INVALID_VL)
1615 return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1619 u64 get_all_cpu_total(u64 __percpu *cntr)
1624 for_each_possible_cpu(cpu)
1625 counter += *per_cpu_ptr(cntr, cpu);
1629 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1631 int vl, int mode, u64 data)
1635 if (vl != CNTR_INVALID_VL)
1638 if (mode == CNTR_MODE_R) {
1639 ret = get_all_cpu_total(cntr) - *z_val;
1640 } else if (mode == CNTR_MODE_W) {
1641 /* A write can only zero the counter */
1643 *z_val = get_all_cpu_total(cntr);
1645 dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1647 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1654 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1655 void *context, int vl, int mode, u64 data)
1657 struct hfi1_devdata *dd = context;
1659 return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1663 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1664 void *context, int vl, int mode, u64 data)
1666 struct hfi1_devdata *dd = context;
1668 return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1672 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1673 void *context, int vl, int mode, u64 data)
1675 struct hfi1_devdata *dd = context;
1677 return dd->verbs_dev.n_piowait;
1680 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1681 void *context, int vl, int mode, u64 data)
1683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1685 return dd->verbs_dev.n_piodrain;
1688 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1689 void *context, int vl, int mode, u64 data)
1691 struct hfi1_devdata *dd = context;
1693 return dd->verbs_dev.n_txwait;
1696 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1697 void *context, int vl, int mode, u64 data)
1699 struct hfi1_devdata *dd = context;
1701 return dd->verbs_dev.n_kmem_wait;
1704 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1705 void *context, int vl, int mode, u64 data)
1707 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1709 return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1713 /* Software counters for the error status bits within MISC_ERR_STATUS */
1714 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1715 void *context, int vl, int mode,
1718 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1720 return dd->misc_err_status_cnt[12];
1723 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1724 void *context, int vl, int mode,
1727 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1729 return dd->misc_err_status_cnt[11];
1732 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1733 void *context, int vl, int mode,
1736 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1738 return dd->misc_err_status_cnt[10];
1741 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1742 void *context, int vl,
1745 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1747 return dd->misc_err_status_cnt[9];
1750 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1751 void *context, int vl, int mode,
1754 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1756 return dd->misc_err_status_cnt[8];
1759 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1760 const struct cntr_entry *entry,
1761 void *context, int vl, int mode, u64 data)
1763 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1765 return dd->misc_err_status_cnt[7];
1768 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1769 void *context, int vl,
1772 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1774 return dd->misc_err_status_cnt[6];
1777 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1778 void *context, int vl, int mode,
1781 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1783 return dd->misc_err_status_cnt[5];
1786 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1787 void *context, int vl, int mode,
1790 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1792 return dd->misc_err_status_cnt[4];
1795 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1796 void *context, int vl,
1799 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1801 return dd->misc_err_status_cnt[3];
1804 static u64 access_misc_csr_write_bad_addr_err_cnt(
1805 const struct cntr_entry *entry,
1806 void *context, int vl, int mode, u64 data)
1808 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1810 return dd->misc_err_status_cnt[2];
1813 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1814 void *context, int vl,
1817 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1819 return dd->misc_err_status_cnt[1];
1822 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1823 void *context, int vl, int mode,
1826 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1828 return dd->misc_err_status_cnt[0];
1832 * Software counter for the aggregate of
1833 * individual CceErrStatus counters
1835 static u64 access_sw_cce_err_status_aggregated_cnt(
1836 const struct cntr_entry *entry,
1837 void *context, int vl, int mode, u64 data)
1839 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1841 return dd->sw_cce_err_status_aggregate;
1845 * Software counters corresponding to each of the
1846 * error status bits within CceErrStatus
1848 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1849 void *context, int vl, int mode,
1852 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1854 return dd->cce_err_status_cnt[40];
1857 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1858 void *context, int vl, int mode,
1861 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1863 return dd->cce_err_status_cnt[39];
1866 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1867 void *context, int vl, int mode,
1870 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1872 return dd->cce_err_status_cnt[38];
1875 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1876 void *context, int vl, int mode,
1879 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1881 return dd->cce_err_status_cnt[37];
1884 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1885 void *context, int vl, int mode,
1888 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1890 return dd->cce_err_status_cnt[36];
1893 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1894 const struct cntr_entry *entry,
1895 void *context, int vl, int mode, u64 data)
1897 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1899 return dd->cce_err_status_cnt[35];
1902 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1903 const struct cntr_entry *entry,
1904 void *context, int vl, int mode, u64 data)
1906 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1908 return dd->cce_err_status_cnt[34];
1911 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1912 void *context, int vl,
1915 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1917 return dd->cce_err_status_cnt[33];
1920 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1921 void *context, int vl, int mode,
1924 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1926 return dd->cce_err_status_cnt[32];
1929 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1930 void *context, int vl, int mode, u64 data)
1932 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1934 return dd->cce_err_status_cnt[31];
1937 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1938 void *context, int vl, int mode,
1941 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1943 return dd->cce_err_status_cnt[30];
1946 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1947 void *context, int vl, int mode,
1950 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1952 return dd->cce_err_status_cnt[29];
1955 static u64 access_pcic_transmit_back_parity_err_cnt(
1956 const struct cntr_entry *entry,
1957 void *context, int vl, int mode, u64 data)
1959 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1961 return dd->cce_err_status_cnt[28];
1964 static u64 access_pcic_transmit_front_parity_err_cnt(
1965 const struct cntr_entry *entry,
1966 void *context, int vl, int mode, u64 data)
1968 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1970 return dd->cce_err_status_cnt[27];
1973 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1974 void *context, int vl, int mode,
1977 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1979 return dd->cce_err_status_cnt[26];
1982 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1983 void *context, int vl, int mode,
1986 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1988 return dd->cce_err_status_cnt[25];
1991 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1992 void *context, int vl, int mode,
1995 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1997 return dd->cce_err_status_cnt[24];
2000 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2001 void *context, int vl, int mode,
2004 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2006 return dd->cce_err_status_cnt[23];
2009 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2010 void *context, int vl,
2013 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2015 return dd->cce_err_status_cnt[22];
2018 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2019 void *context, int vl, int mode,
2022 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2024 return dd->cce_err_status_cnt[21];
2027 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2028 const struct cntr_entry *entry,
2029 void *context, int vl, int mode, u64 data)
2031 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2033 return dd->cce_err_status_cnt[20];
2036 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2037 void *context, int vl,
2040 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2042 return dd->cce_err_status_cnt[19];
2045 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2046 void *context, int vl, int mode,
2049 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2051 return dd->cce_err_status_cnt[18];
2054 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2055 void *context, int vl, int mode,
2058 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2060 return dd->cce_err_status_cnt[17];
2063 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2064 void *context, int vl, int mode,
2067 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2069 return dd->cce_err_status_cnt[16];
2072 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2073 void *context, int vl, int mode,
2076 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2078 return dd->cce_err_status_cnt[15];
2081 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2082 void *context, int vl,
2085 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2087 return dd->cce_err_status_cnt[14];
2090 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2091 void *context, int vl, int mode,
2094 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2096 return dd->cce_err_status_cnt[13];
2099 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2100 const struct cntr_entry *entry,
2101 void *context, int vl, int mode, u64 data)
2103 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2105 return dd->cce_err_status_cnt[12];
2108 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2109 const struct cntr_entry *entry,
2110 void *context, int vl, int mode, u64 data)
2112 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2114 return dd->cce_err_status_cnt[11];
2117 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2118 const struct cntr_entry *entry,
2119 void *context, int vl, int mode, u64 data)
2121 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2123 return dd->cce_err_status_cnt[10];
2126 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2127 const struct cntr_entry *entry,
2128 void *context, int vl, int mode, u64 data)
2130 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2132 return dd->cce_err_status_cnt[9];
2135 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2136 const struct cntr_entry *entry,
2137 void *context, int vl, int mode, u64 data)
2139 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2141 return dd->cce_err_status_cnt[8];
2144 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2145 void *context, int vl,
2148 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2150 return dd->cce_err_status_cnt[7];
2153 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2154 const struct cntr_entry *entry,
2155 void *context, int vl, int mode, u64 data)
2157 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2159 return dd->cce_err_status_cnt[6];
2162 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2163 void *context, int vl, int mode,
2166 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2168 return dd->cce_err_status_cnt[5];
2171 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2172 void *context, int vl, int mode,
2175 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2177 return dd->cce_err_status_cnt[4];
2180 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2181 const struct cntr_entry *entry,
2182 void *context, int vl, int mode, u64 data)
2184 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2186 return dd->cce_err_status_cnt[3];
2189 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2190 void *context, int vl,
2193 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2195 return dd->cce_err_status_cnt[2];
2198 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2199 void *context, int vl,
2202 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2204 return dd->cce_err_status_cnt[1];
2207 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2208 void *context, int vl, int mode,
2211 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2213 return dd->cce_err_status_cnt[0];
2217 * Software counters corresponding to each of the
2218 * error status bits within RcvErrStatus
2220 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2221 void *context, int vl, int mode,
2224 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2226 return dd->rcv_err_status_cnt[63];
2229 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2230 void *context, int vl,
2233 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2235 return dd->rcv_err_status_cnt[62];
2238 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2239 void *context, int vl, int mode,
2242 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2244 return dd->rcv_err_status_cnt[61];
2247 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2248 void *context, int vl, int mode,
2251 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2253 return dd->rcv_err_status_cnt[60];
2256 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2257 void *context, int vl,
2260 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2262 return dd->rcv_err_status_cnt[59];
2265 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2266 void *context, int vl,
2269 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2271 return dd->rcv_err_status_cnt[58];
2274 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2275 void *context, int vl, int mode,
2278 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2280 return dd->rcv_err_status_cnt[57];
2283 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2284 void *context, int vl, int mode,
2287 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2289 return dd->rcv_err_status_cnt[56];
2292 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2293 void *context, int vl, int mode,
2296 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2298 return dd->rcv_err_status_cnt[55];
2301 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2302 const struct cntr_entry *entry,
2303 void *context, int vl, int mode, u64 data)
2305 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2307 return dd->rcv_err_status_cnt[54];
2310 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2311 const struct cntr_entry *entry,
2312 void *context, int vl, int mode, u64 data)
2314 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2316 return dd->rcv_err_status_cnt[53];
2319 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2320 void *context, int vl,
2323 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2325 return dd->rcv_err_status_cnt[52];
2328 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2329 void *context, int vl,
2332 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2334 return dd->rcv_err_status_cnt[51];
2337 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2338 void *context, int vl,
2341 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2343 return dd->rcv_err_status_cnt[50];
2346 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2347 void *context, int vl,
2350 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2352 return dd->rcv_err_status_cnt[49];
2355 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2356 void *context, int vl,
2359 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2361 return dd->rcv_err_status_cnt[48];
2364 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2365 void *context, int vl,
2368 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2370 return dd->rcv_err_status_cnt[47];
2373 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2374 void *context, int vl, int mode,
2377 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2379 return dd->rcv_err_status_cnt[46];
2382 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2383 const struct cntr_entry *entry,
2384 void *context, int vl, int mode, u64 data)
2386 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2388 return dd->rcv_err_status_cnt[45];
2391 static u64 access_rx_lookup_csr_parity_err_cnt(
2392 const struct cntr_entry *entry,
2393 void *context, int vl, int mode, u64 data)
2395 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2397 return dd->rcv_err_status_cnt[44];
2400 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2401 const struct cntr_entry *entry,
2402 void *context, int vl, int mode, u64 data)
2404 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2406 return dd->rcv_err_status_cnt[43];
2409 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2410 const struct cntr_entry *entry,
2411 void *context, int vl, int mode, u64 data)
2413 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2415 return dd->rcv_err_status_cnt[42];
2418 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2419 const struct cntr_entry *entry,
2420 void *context, int vl, int mode, u64 data)
2422 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2424 return dd->rcv_err_status_cnt[41];
2427 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2428 const struct cntr_entry *entry,
2429 void *context, int vl, int mode, u64 data)
2431 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2433 return dd->rcv_err_status_cnt[40];
2436 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2437 const struct cntr_entry *entry,
2438 void *context, int vl, int mode, u64 data)
2440 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2442 return dd->rcv_err_status_cnt[39];
2445 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2446 const struct cntr_entry *entry,
2447 void *context, int vl, int mode, u64 data)
2449 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2451 return dd->rcv_err_status_cnt[38];
2454 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2455 const struct cntr_entry *entry,
2456 void *context, int vl, int mode, u64 data)
2458 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2460 return dd->rcv_err_status_cnt[37];
2463 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2464 const struct cntr_entry *entry,
2465 void *context, int vl, int mode, u64 data)
2467 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2469 return dd->rcv_err_status_cnt[36];
2472 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2473 const struct cntr_entry *entry,
2474 void *context, int vl, int mode, u64 data)
2476 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2478 return dd->rcv_err_status_cnt[35];
2481 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2482 const struct cntr_entry *entry,
2483 void *context, int vl, int mode, u64 data)
2485 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2487 return dd->rcv_err_status_cnt[34];
2490 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2491 const struct cntr_entry *entry,
2492 void *context, int vl, int mode, u64 data)
2494 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2496 return dd->rcv_err_status_cnt[33];
2499 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2500 void *context, int vl, int mode,
2503 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2505 return dd->rcv_err_status_cnt[32];
2508 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2509 void *context, int vl, int mode,
2512 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2514 return dd->rcv_err_status_cnt[31];
2517 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2518 void *context, int vl, int mode,
2521 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2523 return dd->rcv_err_status_cnt[30];
2526 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2527 void *context, int vl, int mode,
2530 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2532 return dd->rcv_err_status_cnt[29];
2535 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2536 void *context, int vl,
2539 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2541 return dd->rcv_err_status_cnt[28];
2544 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2545 const struct cntr_entry *entry,
2546 void *context, int vl, int mode, u64 data)
2548 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2550 return dd->rcv_err_status_cnt[27];
2553 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2554 const struct cntr_entry *entry,
2555 void *context, int vl, int mode, u64 data)
2557 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2559 return dd->rcv_err_status_cnt[26];
2562 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2563 const struct cntr_entry *entry,
2564 void *context, int vl, int mode, u64 data)
2566 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2568 return dd->rcv_err_status_cnt[25];
2571 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2572 const struct cntr_entry *entry,
2573 void *context, int vl, int mode, u64 data)
2575 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2577 return dd->rcv_err_status_cnt[24];
2580 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2581 const struct cntr_entry *entry,
2582 void *context, int vl, int mode, u64 data)
2584 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2586 return dd->rcv_err_status_cnt[23];
2589 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2590 const struct cntr_entry *entry,
2591 void *context, int vl, int mode, u64 data)
2593 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2595 return dd->rcv_err_status_cnt[22];
2598 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2599 const struct cntr_entry *entry,
2600 void *context, int vl, int mode, u64 data)
2602 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2604 return dd->rcv_err_status_cnt[21];
2607 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2608 const struct cntr_entry *entry,
2609 void *context, int vl, int mode, u64 data)
2611 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2613 return dd->rcv_err_status_cnt[20];
2616 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2617 const struct cntr_entry *entry,
2618 void *context, int vl, int mode, u64 data)
2620 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2622 return dd->rcv_err_status_cnt[19];
2625 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2626 void *context, int vl,
2629 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2631 return dd->rcv_err_status_cnt[18];
2634 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2635 void *context, int vl,
2638 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2640 return dd->rcv_err_status_cnt[17];
2643 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2644 const struct cntr_entry *entry,
2645 void *context, int vl, int mode, u64 data)
2647 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2649 return dd->rcv_err_status_cnt[16];
2652 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2653 const struct cntr_entry *entry,
2654 void *context, int vl, int mode, u64 data)
2656 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2658 return dd->rcv_err_status_cnt[15];
2661 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2662 void *context, int vl,
2665 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2667 return dd->rcv_err_status_cnt[14];
2670 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2671 void *context, int vl,
2674 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2676 return dd->rcv_err_status_cnt[13];
2679 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2680 void *context, int vl, int mode,
2683 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2685 return dd->rcv_err_status_cnt[12];
2688 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2689 void *context, int vl, int mode,
2692 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2694 return dd->rcv_err_status_cnt[11];
2697 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2698 void *context, int vl, int mode,
2701 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2703 return dd->rcv_err_status_cnt[10];
2706 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2707 void *context, int vl, int mode,
2710 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2712 return dd->rcv_err_status_cnt[9];
2715 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2716 void *context, int vl, int mode,
2719 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2721 return dd->rcv_err_status_cnt[8];
2724 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2725 const struct cntr_entry *entry,
2726 void *context, int vl, int mode, u64 data)
2728 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2730 return dd->rcv_err_status_cnt[7];
2733 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2734 const struct cntr_entry *entry,
2735 void *context, int vl, int mode, u64 data)
2737 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2739 return dd->rcv_err_status_cnt[6];
2742 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2743 void *context, int vl, int mode,
2746 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2748 return dd->rcv_err_status_cnt[5];
2751 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2752 void *context, int vl, int mode,
2755 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2757 return dd->rcv_err_status_cnt[4];
2760 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2761 void *context, int vl, int mode,
2764 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2766 return dd->rcv_err_status_cnt[3];
2769 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2770 void *context, int vl, int mode,
2773 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2775 return dd->rcv_err_status_cnt[2];
2778 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2779 void *context, int vl, int mode,
2782 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2784 return dd->rcv_err_status_cnt[1];
2787 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2788 void *context, int vl, int mode,
2791 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2793 return dd->rcv_err_status_cnt[0];
2797 * Software counters corresponding to each of the
2798 * error status bits within SendPioErrStatus
2800 static u64 access_pio_pec_sop_head_parity_err_cnt(
2801 const struct cntr_entry *entry,
2802 void *context, int vl, int mode, u64 data)
2804 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2806 return dd->send_pio_err_status_cnt[35];
2809 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2810 const struct cntr_entry *entry,
2811 void *context, int vl, int mode, u64 data)
2813 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2815 return dd->send_pio_err_status_cnt[34];
2818 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2819 const struct cntr_entry *entry,
2820 void *context, int vl, int mode, u64 data)
2822 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2824 return dd->send_pio_err_status_cnt[33];
2827 static u64 access_pio_current_free_cnt_parity_err_cnt(
2828 const struct cntr_entry *entry,
2829 void *context, int vl, int mode, u64 data)
2831 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2833 return dd->send_pio_err_status_cnt[32];
2836 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2837 void *context, int vl, int mode,
2840 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2842 return dd->send_pio_err_status_cnt[31];
2845 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2846 void *context, int vl, int mode,
2849 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2851 return dd->send_pio_err_status_cnt[30];
2854 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2855 void *context, int vl, int mode,
2858 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2860 return dd->send_pio_err_status_cnt[29];
2863 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2864 const struct cntr_entry *entry,
2865 void *context, int vl, int mode, u64 data)
2867 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2869 return dd->send_pio_err_status_cnt[28];
2872 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2873 void *context, int vl, int mode,
2876 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2878 return dd->send_pio_err_status_cnt[27];
2881 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2882 void *context, int vl, int mode,
2885 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2887 return dd->send_pio_err_status_cnt[26];
2890 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2891 void *context, int vl,
2894 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2896 return dd->send_pio_err_status_cnt[25];
2899 static u64 access_pio_block_qw_count_parity_err_cnt(
2900 const struct cntr_entry *entry,
2901 void *context, int vl, int mode, u64 data)
2903 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2905 return dd->send_pio_err_status_cnt[24];
2908 static u64 access_pio_write_qw_valid_parity_err_cnt(
2909 const struct cntr_entry *entry,
2910 void *context, int vl, int mode, u64 data)
2912 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2914 return dd->send_pio_err_status_cnt[23];
2917 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2918 void *context, int vl, int mode,
2921 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2923 return dd->send_pio_err_status_cnt[22];
2926 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2927 void *context, int vl,
2930 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2932 return dd->send_pio_err_status_cnt[21];
2935 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2936 void *context, int vl,
2939 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2941 return dd->send_pio_err_status_cnt[20];
2944 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2945 void *context, int vl,
2948 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2950 return dd->send_pio_err_status_cnt[19];
2953 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2954 const struct cntr_entry *entry,
2955 void *context, int vl, int mode, u64 data)
2957 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2959 return dd->send_pio_err_status_cnt[18];
2962 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2963 void *context, int vl, int mode,
2966 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2968 return dd->send_pio_err_status_cnt[17];
2971 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2972 void *context, int vl, int mode,
2975 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2977 return dd->send_pio_err_status_cnt[16];
2980 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2981 const struct cntr_entry *entry,
2982 void *context, int vl, int mode, u64 data)
2984 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2986 return dd->send_pio_err_status_cnt[15];
2989 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2990 const struct cntr_entry *entry,
2991 void *context, int vl, int mode, u64 data)
2993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2995 return dd->send_pio_err_status_cnt[14];
2998 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2999 const struct cntr_entry *entry,
3000 void *context, int vl, int mode, u64 data)
3002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3004 return dd->send_pio_err_status_cnt[13];
3007 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3008 const struct cntr_entry *entry,
3009 void *context, int vl, int mode, u64 data)
3011 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3013 return dd->send_pio_err_status_cnt[12];
3016 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3017 const struct cntr_entry *entry,
3018 void *context, int vl, int mode, u64 data)
3020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3022 return dd->send_pio_err_status_cnt[11];
3025 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3026 const struct cntr_entry *entry,
3027 void *context, int vl, int mode, u64 data)
3029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3031 return dd->send_pio_err_status_cnt[10];
3034 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3035 const struct cntr_entry *entry,
3036 void *context, int vl, int mode, u64 data)
3038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3040 return dd->send_pio_err_status_cnt[9];
3043 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3044 const struct cntr_entry *entry,
3045 void *context, int vl, int mode, u64 data)
3047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049 return dd->send_pio_err_status_cnt[8];
3052 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3053 const struct cntr_entry *entry,
3054 void *context, int vl, int mode, u64 data)
3056 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3058 return dd->send_pio_err_status_cnt[7];
3061 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3062 void *context, int vl, int mode,
3065 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3067 return dd->send_pio_err_status_cnt[6];
3070 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3071 void *context, int vl, int mode,
3074 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3076 return dd->send_pio_err_status_cnt[5];
3079 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3080 void *context, int vl, int mode,
3083 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3085 return dd->send_pio_err_status_cnt[4];
3088 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3089 void *context, int vl, int mode,
3092 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3094 return dd->send_pio_err_status_cnt[3];
3097 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3098 void *context, int vl, int mode,
3101 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3103 return dd->send_pio_err_status_cnt[2];
3106 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3107 void *context, int vl,
3110 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3112 return dd->send_pio_err_status_cnt[1];
3115 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3116 void *context, int vl, int mode,
3119 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3121 return dd->send_pio_err_status_cnt[0];
3125 * Software counters corresponding to each of the
3126 * error status bits within SendDmaErrStatus
3128 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3129 const struct cntr_entry *entry,
3130 void *context, int vl, int mode, u64 data)
3132 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3134 return dd->send_dma_err_status_cnt[3];
3137 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3138 const struct cntr_entry *entry,
3139 void *context, int vl, int mode, u64 data)
3141 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3143 return dd->send_dma_err_status_cnt[2];
3146 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3147 void *context, int vl, int mode,
3150 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3152 return dd->send_dma_err_status_cnt[1];
3155 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3156 void *context, int vl, int mode,
3159 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3161 return dd->send_dma_err_status_cnt[0];
3165 * Software counters corresponding to each of the
3166 * error status bits within SendEgressErrStatus
3168 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3169 const struct cntr_entry *entry,
3170 void *context, int vl, int mode, u64 data)
3172 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3174 return dd->send_egress_err_status_cnt[63];
3177 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3178 const struct cntr_entry *entry,
3179 void *context, int vl, int mode, u64 data)
3181 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3183 return dd->send_egress_err_status_cnt[62];
3186 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3187 void *context, int vl, int mode,
3190 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3192 return dd->send_egress_err_status_cnt[61];
3195 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3196 void *context, int vl,
3199 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3201 return dd->send_egress_err_status_cnt[60];
3204 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3205 const struct cntr_entry *entry,
3206 void *context, int vl, int mode, u64 data)
3208 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3210 return dd->send_egress_err_status_cnt[59];
3213 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3214 void *context, int vl, int mode,
3217 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3219 return dd->send_egress_err_status_cnt[58];
3222 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3223 void *context, int vl, int mode,
3226 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3228 return dd->send_egress_err_status_cnt[57];
3231 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3232 void *context, int vl, int mode,
3235 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3237 return dd->send_egress_err_status_cnt[56];
3240 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3241 void *context, int vl, int mode,
3244 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3246 return dd->send_egress_err_status_cnt[55];
3249 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3250 void *context, int vl, int mode,
3253 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3255 return dd->send_egress_err_status_cnt[54];
3258 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3259 void *context, int vl, int mode,
3262 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3264 return dd->send_egress_err_status_cnt[53];
3267 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3268 void *context, int vl, int mode,
3271 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3273 return dd->send_egress_err_status_cnt[52];
3276 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3277 void *context, int vl, int mode,
3280 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3282 return dd->send_egress_err_status_cnt[51];
3285 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3286 void *context, int vl, int mode,
3289 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3291 return dd->send_egress_err_status_cnt[50];
3294 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3295 void *context, int vl, int mode,
3298 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3300 return dd->send_egress_err_status_cnt[49];
3303 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3304 void *context, int vl, int mode,
3307 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3309 return dd->send_egress_err_status_cnt[48];
3312 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3313 void *context, int vl, int mode,
3316 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3318 return dd->send_egress_err_status_cnt[47];
3321 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3322 void *context, int vl, int mode,
3325 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3327 return dd->send_egress_err_status_cnt[46];
3330 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3331 void *context, int vl, int mode,
3334 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3336 return dd->send_egress_err_status_cnt[45];
3339 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3340 void *context, int vl,
3343 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3345 return dd->send_egress_err_status_cnt[44];
3348 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3349 const struct cntr_entry *entry,
3350 void *context, int vl, int mode, u64 data)
3352 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3354 return dd->send_egress_err_status_cnt[43];
3357 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3358 void *context, int vl, int mode,
3361 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3363 return dd->send_egress_err_status_cnt[42];
3366 static u64 access_tx_credit_return_partiy_err_cnt(
3367 const struct cntr_entry *entry,
3368 void *context, int vl, int mode, u64 data)
3370 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3372 return dd->send_egress_err_status_cnt[41];
3375 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3376 const struct cntr_entry *entry,
3377 void *context, int vl, int mode, u64 data)
3379 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3381 return dd->send_egress_err_status_cnt[40];
3384 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3385 const struct cntr_entry *entry,
3386 void *context, int vl, int mode, u64 data)
3388 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3390 return dd->send_egress_err_status_cnt[39];
3393 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3394 const struct cntr_entry *entry,
3395 void *context, int vl, int mode, u64 data)
3397 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3399 return dd->send_egress_err_status_cnt[38];
3402 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3403 const struct cntr_entry *entry,
3404 void *context, int vl, int mode, u64 data)
3406 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3408 return dd->send_egress_err_status_cnt[37];
3411 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3412 const struct cntr_entry *entry,
3413 void *context, int vl, int mode, u64 data)
3415 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3417 return dd->send_egress_err_status_cnt[36];
3420 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3421 const struct cntr_entry *entry,
3422 void *context, int vl, int mode, u64 data)
3424 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3426 return dd->send_egress_err_status_cnt[35];
3429 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3430 const struct cntr_entry *entry,
3431 void *context, int vl, int mode, u64 data)
3433 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3435 return dd->send_egress_err_status_cnt[34];
3438 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3439 const struct cntr_entry *entry,
3440 void *context, int vl, int mode, u64 data)
3442 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3444 return dd->send_egress_err_status_cnt[33];
3447 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3448 const struct cntr_entry *entry,
3449 void *context, int vl, int mode, u64 data)
3451 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3453 return dd->send_egress_err_status_cnt[32];
3456 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3457 const struct cntr_entry *entry,
3458 void *context, int vl, int mode, u64 data)
3460 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3462 return dd->send_egress_err_status_cnt[31];
3465 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3466 const struct cntr_entry *entry,
3467 void *context, int vl, int mode, u64 data)
3469 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3471 return dd->send_egress_err_status_cnt[30];
3474 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3475 const struct cntr_entry *entry,
3476 void *context, int vl, int mode, u64 data)
3478 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3480 return dd->send_egress_err_status_cnt[29];
3483 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3484 const struct cntr_entry *entry,
3485 void *context, int vl, int mode, u64 data)
3487 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3489 return dd->send_egress_err_status_cnt[28];
3492 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3493 const struct cntr_entry *entry,
3494 void *context, int vl, int mode, u64 data)
3496 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3498 return dd->send_egress_err_status_cnt[27];
3501 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3502 const struct cntr_entry *entry,
3503 void *context, int vl, int mode, u64 data)
3505 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3507 return dd->send_egress_err_status_cnt[26];
3510 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3511 const struct cntr_entry *entry,
3512 void *context, int vl, int mode, u64 data)
3514 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3516 return dd->send_egress_err_status_cnt[25];
3519 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3520 const struct cntr_entry *entry,
3521 void *context, int vl, int mode, u64 data)
3523 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3525 return dd->send_egress_err_status_cnt[24];
3528 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3529 const struct cntr_entry *entry,
3530 void *context, int vl, int mode, u64 data)
3532 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3534 return dd->send_egress_err_status_cnt[23];
3537 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3538 const struct cntr_entry *entry,
3539 void *context, int vl, int mode, u64 data)
3541 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3543 return dd->send_egress_err_status_cnt[22];
3546 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3547 const struct cntr_entry *entry,
3548 void *context, int vl, int mode, u64 data)
3550 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3552 return dd->send_egress_err_status_cnt[21];
3555 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3556 const struct cntr_entry *entry,
3557 void *context, int vl, int mode, u64 data)
3559 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3561 return dd->send_egress_err_status_cnt[20];
3564 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3565 const struct cntr_entry *entry,
3566 void *context, int vl, int mode, u64 data)
3568 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3570 return dd->send_egress_err_status_cnt[19];
3573 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3574 const struct cntr_entry *entry,
3575 void *context, int vl, int mode, u64 data)
3577 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3579 return dd->send_egress_err_status_cnt[18];
3582 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3583 const struct cntr_entry *entry,
3584 void *context, int vl, int mode, u64 data)
3586 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3588 return dd->send_egress_err_status_cnt[17];
3591 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3592 const struct cntr_entry *entry,
3593 void *context, int vl, int mode, u64 data)
3595 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3597 return dd->send_egress_err_status_cnt[16];
3600 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3601 void *context, int vl, int mode,
3604 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3606 return dd->send_egress_err_status_cnt[15];
3609 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3610 void *context, int vl,
3613 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3615 return dd->send_egress_err_status_cnt[14];
3618 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3619 void *context, int vl, int mode,
3622 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3624 return dd->send_egress_err_status_cnt[13];
3627 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3628 void *context, int vl, int mode,
3631 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3633 return dd->send_egress_err_status_cnt[12];
3636 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3637 const struct cntr_entry *entry,
3638 void *context, int vl, int mode, u64 data)
3640 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3642 return dd->send_egress_err_status_cnt[11];
3645 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3646 void *context, int vl, int mode,
3649 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3651 return dd->send_egress_err_status_cnt[10];
3654 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3655 void *context, int vl, int mode,
3658 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3660 return dd->send_egress_err_status_cnt[9];
3663 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3664 const struct cntr_entry *entry,
3665 void *context, int vl, int mode, u64 data)
3667 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3669 return dd->send_egress_err_status_cnt[8];
3672 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3673 const struct cntr_entry *entry,
3674 void *context, int vl, int mode, u64 data)
3676 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3678 return dd->send_egress_err_status_cnt[7];
3681 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3682 void *context, int vl, int mode,
3685 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3687 return dd->send_egress_err_status_cnt[6];
3690 static u64 access_tx_incorrect_link_state_err_cnt(
3691 const struct cntr_entry *entry,
3692 void *context, int vl, int mode, u64 data)
3694 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3696 return dd->send_egress_err_status_cnt[5];
3699 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3700 void *context, int vl, int mode,
3703 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3705 return dd->send_egress_err_status_cnt[4];
3708 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3709 const struct cntr_entry *entry,
3710 void *context, int vl, int mode, u64 data)
3712 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3714 return dd->send_egress_err_status_cnt[3];
3717 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3718 void *context, int vl, int mode,
3721 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3723 return dd->send_egress_err_status_cnt[2];
3726 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3727 const struct cntr_entry *entry,
3728 void *context, int vl, int mode, u64 data)
3730 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3732 return dd->send_egress_err_status_cnt[1];
3735 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3736 const struct cntr_entry *entry,
3737 void *context, int vl, int mode, u64 data)
3739 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3741 return dd->send_egress_err_status_cnt[0];
3745 * Software counters corresponding to each of the
3746 * error status bits within SendErrStatus
3748 static u64 access_send_csr_write_bad_addr_err_cnt(
3749 const struct cntr_entry *entry,
3750 void *context, int vl, int mode, u64 data)
3752 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3754 return dd->send_err_status_cnt[2];
3757 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3758 void *context, int vl,
3761 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3763 return dd->send_err_status_cnt[1];
3766 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3767 void *context, int vl, int mode,
3770 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3772 return dd->send_err_status_cnt[0];
3776 * Software counters corresponding to each of the
3777 * error status bits within SendCtxtErrStatus
3779 static u64 access_pio_write_out_of_bounds_err_cnt(
3780 const struct cntr_entry *entry,
3781 void *context, int vl, int mode, u64 data)
3783 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3785 return dd->sw_ctxt_err_status_cnt[4];
3788 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3789 void *context, int vl, int mode,
3792 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3794 return dd->sw_ctxt_err_status_cnt[3];
3797 static u64 access_pio_write_crosses_boundary_err_cnt(
3798 const struct cntr_entry *entry,
3799 void *context, int vl, int mode, u64 data)
3801 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3803 return dd->sw_ctxt_err_status_cnt[2];
3806 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3807 void *context, int vl,
3810 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3812 return dd->sw_ctxt_err_status_cnt[1];
3815 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3816 void *context, int vl, int mode,
3819 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3821 return dd->sw_ctxt_err_status_cnt[0];
3825 * Software counters corresponding to each of the
3826 * error status bits within SendDmaEngErrStatus
3828 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3829 const struct cntr_entry *entry,
3830 void *context, int vl, int mode, u64 data)
3832 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3834 return dd->sw_send_dma_eng_err_status_cnt[23];
3837 static u64 access_sdma_header_storage_cor_err_cnt(
3838 const struct cntr_entry *entry,
3839 void *context, int vl, int mode, u64 data)
3841 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3843 return dd->sw_send_dma_eng_err_status_cnt[22];
3846 static u64 access_sdma_packet_tracking_cor_err_cnt(
3847 const struct cntr_entry *entry,
3848 void *context, int vl, int mode, u64 data)
3850 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3852 return dd->sw_send_dma_eng_err_status_cnt[21];
3855 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3856 void *context, int vl, int mode,
3859 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3861 return dd->sw_send_dma_eng_err_status_cnt[20];
3864 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3865 void *context, int vl, int mode,
3868 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3870 return dd->sw_send_dma_eng_err_status_cnt[19];
3873 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3874 const struct cntr_entry *entry,
3875 void *context, int vl, int mode, u64 data)
3877 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3879 return dd->sw_send_dma_eng_err_status_cnt[18];
3882 static u64 access_sdma_header_storage_unc_err_cnt(
3883 const struct cntr_entry *entry,
3884 void *context, int vl, int mode, u64 data)
3886 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3888 return dd->sw_send_dma_eng_err_status_cnt[17];
3891 static u64 access_sdma_packet_tracking_unc_err_cnt(
3892 const struct cntr_entry *entry,
3893 void *context, int vl, int mode, u64 data)
3895 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3897 return dd->sw_send_dma_eng_err_status_cnt[16];
3900 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3901 void *context, int vl, int mode,
3904 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3906 return dd->sw_send_dma_eng_err_status_cnt[15];
3909 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3910 void *context, int vl, int mode,
3913 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3915 return dd->sw_send_dma_eng_err_status_cnt[14];
3918 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3919 void *context, int vl, int mode,
3922 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3924 return dd->sw_send_dma_eng_err_status_cnt[13];
3927 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3928 void *context, int vl, int mode,
3931 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3933 return dd->sw_send_dma_eng_err_status_cnt[12];
3936 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3937 void *context, int vl, int mode,
3940 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3942 return dd->sw_send_dma_eng_err_status_cnt[11];
3945 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3946 void *context, int vl, int mode,
3949 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3951 return dd->sw_send_dma_eng_err_status_cnt[10];
3954 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3955 void *context, int vl, int mode,
3958 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3960 return dd->sw_send_dma_eng_err_status_cnt[9];
3963 static u64 access_sdma_packet_desc_overflow_err_cnt(
3964 const struct cntr_entry *entry,
3965 void *context, int vl, int mode, u64 data)
3967 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3969 return dd->sw_send_dma_eng_err_status_cnt[8];
3972 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3973 void *context, int vl,
3976 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3978 return dd->sw_send_dma_eng_err_status_cnt[7];
3981 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3982 void *context, int vl, int mode, u64 data)
3984 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3986 return dd->sw_send_dma_eng_err_status_cnt[6];
3989 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3990 void *context, int vl, int mode,
3993 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3995 return dd->sw_send_dma_eng_err_status_cnt[5];
3998 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3999 void *context, int vl, int mode,
4002 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4004 return dd->sw_send_dma_eng_err_status_cnt[4];
4007 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4008 const struct cntr_entry *entry,
4009 void *context, int vl, int mode, u64 data)
4011 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4013 return dd->sw_send_dma_eng_err_status_cnt[3];
4016 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4017 void *context, int vl, int mode,
4020 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4022 return dd->sw_send_dma_eng_err_status_cnt[2];
4025 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4026 void *context, int vl, int mode,
4029 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4031 return dd->sw_send_dma_eng_err_status_cnt[1];
4034 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4035 void *context, int vl, int mode,
4038 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4040 return dd->sw_send_dma_eng_err_status_cnt[0];
4043 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4044 void *context, int vl, int mode,
4047 struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4050 u64 csr = entry->csr;
4052 val = read_write_csr(dd, csr, mode, data);
4053 if (mode == CNTR_MODE_R) {
4054 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4055 CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4056 } else if (mode == CNTR_MODE_W) {
4057 dd->sw_rcv_bypass_packet_errors = 0;
4059 dd_dev_err(dd, "Invalid cntr register access mode");
4065 #define def_access_sw_cpu(cntr) \
4066 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry, \
4067 void *context, int vl, int mode, u64 data) \
4069 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4070 return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr, \
4071 ppd->ibport_data.rvp.cntr, vl, \
4075 def_access_sw_cpu(rc_acks);
4076 def_access_sw_cpu(rc_qacks);
4077 def_access_sw_cpu(rc_delayed_comp);
4079 #define def_access_ibp_counter(cntr) \
4080 static u64 access_ibp_##cntr(const struct cntr_entry *entry, \
4081 void *context, int vl, int mode, u64 data) \
4083 struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context; \
4085 if (vl != CNTR_INVALID_VL) \
4088 return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr, \
4092 def_access_ibp_counter(loop_pkts);
4093 def_access_ibp_counter(rc_resends);
4094 def_access_ibp_counter(rnr_naks);
4095 def_access_ibp_counter(other_naks);
4096 def_access_ibp_counter(rc_timeouts);
4097 def_access_ibp_counter(pkt_drops);
4098 def_access_ibp_counter(dmawait);
4099 def_access_ibp_counter(rc_seqnak);
4100 def_access_ibp_counter(rc_dupreq);
4101 def_access_ibp_counter(rdma_seq);
4102 def_access_ibp_counter(unaligned);
4103 def_access_ibp_counter(seq_naks);
4105 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4106 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4107 [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
4108 [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
4109 [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
4110 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4112 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4114 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4115 RCV_TID_FLOW_GEN_MISMATCH_CNT,
4117 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4119 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4120 RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4121 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4122 CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4123 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4125 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4127 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4129 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4131 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4133 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4135 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4136 CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4137 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4138 CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4139 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4141 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4142 access_dc_rcv_err_cnt),
4143 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4145 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4147 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4149 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4150 DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4151 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4152 DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4154 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4155 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4156 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4158 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4160 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4162 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4164 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4166 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4168 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4170 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4171 CNTR_SYNTH | CNTR_VL),
4172 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4173 CNTR_SYNTH | CNTR_VL),
4174 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4175 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4176 CNTR_SYNTH | CNTR_VL),
4177 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4178 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4179 CNTR_SYNTH | CNTR_VL),
4180 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4182 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4183 CNTR_SYNTH | CNTR_VL),
4184 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4186 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4187 CNTR_SYNTH | CNTR_VL),
4189 DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4191 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4193 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4195 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4197 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4199 [C_DC_CRC_MULT_LN] =
4200 DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4202 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4204 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4206 [C_DC_SEQ_CRC_CNT] =
4207 DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4209 [C_DC_ESC0_ONLY_CNT] =
4210 DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4212 [C_DC_ESC0_PLUS1_CNT] =
4213 DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4215 [C_DC_ESC0_PLUS2_CNT] =
4216 DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4218 [C_DC_REINIT_FROM_PEER_CNT] =
4219 DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4221 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4223 [C_DC_MISC_FLG_CNT] =
4224 DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4226 [C_DC_PRF_GOOD_LTP_CNT] =
4227 DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4228 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4229 DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4231 [C_DC_PRF_RX_FLIT_CNT] =
4232 DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4233 [C_DC_PRF_TX_FLIT_CNT] =
4234 DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4235 [C_DC_PRF_CLK_CNTR] =
4236 DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4237 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4238 DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4239 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4240 DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4242 [C_DC_PG_STS_TX_SBE_CNT] =
4243 DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4244 [C_DC_PG_STS_TX_MBE_CNT] =
4245 DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4247 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4248 access_sw_cpu_intr),
4249 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4250 access_sw_cpu_rcv_limit),
4251 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4252 access_sw_vtx_wait),
4253 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4254 access_sw_pio_wait),
4255 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4256 access_sw_pio_drain),
4257 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4258 access_sw_kmem_wait),
4259 [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
4260 hfi1_access_sw_tid_wait),
4261 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4262 access_sw_send_schedule),
4263 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4264 SEND_DMA_DESC_FETCHED_CNT, 0,
4265 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4266 dev_access_u32_csr),
4267 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4268 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4269 access_sde_int_cnt),
4270 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4271 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4272 access_sde_err_cnt),
4273 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4274 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4275 access_sde_idle_int_cnt),
4276 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4277 CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4278 access_sde_progress_int_cnt),
4279 /* MISC_ERR_STATUS */
4280 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4282 access_misc_pll_lock_fail_err_cnt),
4283 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4285 access_misc_mbist_fail_err_cnt),
4286 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4288 access_misc_invalid_eep_cmd_err_cnt),
4289 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4291 access_misc_efuse_done_parity_err_cnt),
4292 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4294 access_misc_efuse_write_err_cnt),
4295 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4297 access_misc_efuse_read_bad_addr_err_cnt),
4298 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4300 access_misc_efuse_csr_parity_err_cnt),
4301 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4303 access_misc_fw_auth_failed_err_cnt),
4304 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4306 access_misc_key_mismatch_err_cnt),
4307 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4309 access_misc_sbus_write_failed_err_cnt),
4310 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4312 access_misc_csr_write_bad_addr_err_cnt),
4313 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4315 access_misc_csr_read_bad_addr_err_cnt),
4316 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4318 access_misc_csr_parity_err_cnt),
4320 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4322 access_sw_cce_err_status_aggregated_cnt),
4323 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4325 access_cce_msix_csr_parity_err_cnt),
4326 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4328 access_cce_int_map_unc_err_cnt),
4329 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4331 access_cce_int_map_cor_err_cnt),
4332 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4334 access_cce_msix_table_unc_err_cnt),
4335 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4337 access_cce_msix_table_cor_err_cnt),
4338 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4340 access_cce_rxdma_conv_fifo_parity_err_cnt),
4341 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4343 access_cce_rcpl_async_fifo_parity_err_cnt),
4344 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4346 access_cce_seg_write_bad_addr_err_cnt),
4347 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4349 access_cce_seg_read_bad_addr_err_cnt),
4350 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4352 access_la_triggered_cnt),
4353 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4355 access_cce_trgt_cpl_timeout_err_cnt),
4356 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4358 access_pcic_receive_parity_err_cnt),
4359 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4361 access_pcic_transmit_back_parity_err_cnt),
4362 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4364 access_pcic_transmit_front_parity_err_cnt),
4365 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4367 access_pcic_cpl_dat_q_unc_err_cnt),
4368 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4370 access_pcic_cpl_hd_q_unc_err_cnt),
4371 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4373 access_pcic_post_dat_q_unc_err_cnt),
4374 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4376 access_pcic_post_hd_q_unc_err_cnt),
4377 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4379 access_pcic_retry_sot_mem_unc_err_cnt),
4380 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4382 access_pcic_retry_mem_unc_err),
4383 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4385 access_pcic_n_post_dat_q_parity_err_cnt),
4386 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4388 access_pcic_n_post_h_q_parity_err_cnt),
4389 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4391 access_pcic_cpl_dat_q_cor_err_cnt),
4392 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4394 access_pcic_cpl_hd_q_cor_err_cnt),
4395 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4397 access_pcic_post_dat_q_cor_err_cnt),
4398 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4400 access_pcic_post_hd_q_cor_err_cnt),
4401 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4403 access_pcic_retry_sot_mem_cor_err_cnt),
4404 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4406 access_pcic_retry_mem_cor_err_cnt),
4407 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4408 "CceCli1AsyncFifoDbgParityError", 0, 0,
4410 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4411 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4412 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4414 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4416 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4417 "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4419 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4420 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4421 "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4423 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4424 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4426 access_cce_cli2_async_fifo_parity_err_cnt),
4427 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4429 access_cce_csr_cfg_bus_parity_err_cnt),
4430 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4432 access_cce_cli0_async_fifo_parity_err_cnt),
4433 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4435 access_cce_rspd_data_parity_err_cnt),
4436 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4438 access_cce_trgt_access_err_cnt),
4439 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4441 access_cce_trgt_async_fifo_parity_err_cnt),
4442 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4444 access_cce_csr_write_bad_addr_err_cnt),
4445 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4447 access_cce_csr_read_bad_addr_err_cnt),
4448 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4450 access_ccs_csr_parity_err_cnt),
4453 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4455 access_rx_csr_parity_err_cnt),
4456 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4458 access_rx_csr_write_bad_addr_err_cnt),
4459 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4461 access_rx_csr_read_bad_addr_err_cnt),
4462 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4464 access_rx_dma_csr_unc_err_cnt),
4465 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4467 access_rx_dma_dq_fsm_encoding_err_cnt),
4468 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4470 access_rx_dma_eq_fsm_encoding_err_cnt),
4471 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4473 access_rx_dma_csr_parity_err_cnt),
4474 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4476 access_rx_rbuf_data_cor_err_cnt),
4477 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4479 access_rx_rbuf_data_unc_err_cnt),
4480 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4482 access_rx_dma_data_fifo_rd_cor_err_cnt),
4483 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4485 access_rx_dma_data_fifo_rd_unc_err_cnt),
4486 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4488 access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4489 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4491 access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4492 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4494 access_rx_rbuf_desc_part2_cor_err_cnt),
4495 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4497 access_rx_rbuf_desc_part2_unc_err_cnt),
4498 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4500 access_rx_rbuf_desc_part1_cor_err_cnt),
4501 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4503 access_rx_rbuf_desc_part1_unc_err_cnt),
4504 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4506 access_rx_hq_intr_fsm_err_cnt),
4507 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4509 access_rx_hq_intr_csr_parity_err_cnt),
4510 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4512 access_rx_lookup_csr_parity_err_cnt),
4513 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4515 access_rx_lookup_rcv_array_cor_err_cnt),
4516 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4518 access_rx_lookup_rcv_array_unc_err_cnt),
4519 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4521 access_rx_lookup_des_part2_parity_err_cnt),
4522 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4524 access_rx_lookup_des_part1_unc_cor_err_cnt),
4525 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4527 access_rx_lookup_des_part1_unc_err_cnt),
4528 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4530 access_rx_rbuf_next_free_buf_cor_err_cnt),
4531 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4533 access_rx_rbuf_next_free_buf_unc_err_cnt),
4534 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4535 "RxRbufFlInitWrAddrParityErr", 0, 0,
4537 access_rbuf_fl_init_wr_addr_parity_err_cnt),
4538 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4540 access_rx_rbuf_fl_initdone_parity_err_cnt),
4541 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4543 access_rx_rbuf_fl_write_addr_parity_err_cnt),
4544 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4546 access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4547 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4549 access_rx_rbuf_empty_err_cnt),
4550 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4552 access_rx_rbuf_full_err_cnt),
4553 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4555 access_rbuf_bad_lookup_err_cnt),
4556 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4558 access_rbuf_ctx_id_parity_err_cnt),
4559 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4561 access_rbuf_csr_qeopdw_parity_err_cnt),
4562 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4563 "RxRbufCsrQNumOfPktParityErr", 0, 0,
4565 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4566 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4567 "RxRbufCsrQTlPtrParityErr", 0, 0,
4569 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4570 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4572 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4573 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4575 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4576 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4578 access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4579 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4581 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4582 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4583 "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4585 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4586 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4588 access_rx_rbuf_block_list_read_cor_err_cnt),
4589 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4591 access_rx_rbuf_block_list_read_unc_err_cnt),
4592 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4594 access_rx_rbuf_lookup_des_cor_err_cnt),
4595 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4597 access_rx_rbuf_lookup_des_unc_err_cnt),
4598 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4599 "RxRbufLookupDesRegUncCorErr", 0, 0,
4601 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4602 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4604 access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4605 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4607 access_rx_rbuf_free_list_cor_err_cnt),
4608 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4610 access_rx_rbuf_free_list_unc_err_cnt),
4611 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4613 access_rx_rcv_fsm_encoding_err_cnt),
4614 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4616 access_rx_dma_flag_cor_err_cnt),
4617 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4619 access_rx_dma_flag_unc_err_cnt),
4620 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4622 access_rx_dc_sop_eop_parity_err_cnt),
4623 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4625 access_rx_rcv_csr_parity_err_cnt),
4626 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4628 access_rx_rcv_qp_map_table_cor_err_cnt),
4629 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4631 access_rx_rcv_qp_map_table_unc_err_cnt),
4632 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4634 access_rx_rcv_data_cor_err_cnt),
4635 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4637 access_rx_rcv_data_unc_err_cnt),
4638 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4640 access_rx_rcv_hdr_cor_err_cnt),
4641 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4643 access_rx_rcv_hdr_unc_err_cnt),
4644 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4646 access_rx_dc_intf_parity_err_cnt),
4647 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4649 access_rx_dma_csr_cor_err_cnt),
4650 /* SendPioErrStatus */
4651 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4653 access_pio_pec_sop_head_parity_err_cnt),
4654 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4656 access_pio_pcc_sop_head_parity_err_cnt),
4657 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4659 access_pio_last_returned_cnt_parity_err_cnt),
4660 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4662 access_pio_current_free_cnt_parity_err_cnt),
4663 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4665 access_pio_reserved_31_err_cnt),
4666 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4668 access_pio_reserved_30_err_cnt),
4669 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4671 access_pio_ppmc_sop_len_err_cnt),
4672 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4674 access_pio_ppmc_bqc_mem_parity_err_cnt),
4675 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4677 access_pio_vl_fifo_parity_err_cnt),
4678 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4680 access_pio_vlf_sop_parity_err_cnt),
4681 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4683 access_pio_vlf_v1_len_parity_err_cnt),
4684 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4686 access_pio_block_qw_count_parity_err_cnt),
4687 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4689 access_pio_write_qw_valid_parity_err_cnt),
4690 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4692 access_pio_state_machine_err_cnt),
4693 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4695 access_pio_write_data_parity_err_cnt),
4696 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4698 access_pio_host_addr_mem_cor_err_cnt),
4699 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4701 access_pio_host_addr_mem_unc_err_cnt),
4702 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4704 access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4705 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4707 access_pio_init_sm_in_err_cnt),
4708 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4710 access_pio_ppmc_pbl_fifo_err_cnt),
4711 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4713 access_pio_credit_ret_fifo_parity_err_cnt),
4714 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4716 access_pio_v1_len_mem_bank1_cor_err_cnt),
4717 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4719 access_pio_v1_len_mem_bank0_cor_err_cnt),
4720 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4722 access_pio_v1_len_mem_bank1_unc_err_cnt),
4723 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4725 access_pio_v1_len_mem_bank0_unc_err_cnt),
4726 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4728 access_pio_sm_pkt_reset_parity_err_cnt),
4729 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4731 access_pio_pkt_evict_fifo_parity_err_cnt),
4732 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4733 "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4735 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4736 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4738 access_pio_sbrdctl_crrel_parity_err_cnt),
4739 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4741 access_pio_pec_fifo_parity_err_cnt),
4742 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4744 access_pio_pcc_fifo_parity_err_cnt),
4745 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4747 access_pio_sb_mem_fifo1_err_cnt),
4748 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4750 access_pio_sb_mem_fifo0_err_cnt),
4751 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4753 access_pio_csr_parity_err_cnt),
4754 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4756 access_pio_write_addr_parity_err_cnt),
4757 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4759 access_pio_write_bad_ctxt_err_cnt),
4760 /* SendDmaErrStatus */
4761 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4763 access_sdma_pcie_req_tracking_cor_err_cnt),
4764 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4766 access_sdma_pcie_req_tracking_unc_err_cnt),
4767 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4769 access_sdma_csr_parity_err_cnt),
4770 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4772 access_sdma_rpy_tag_err_cnt),
4773 /* SendEgressErrStatus */
4774 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4776 access_tx_read_pio_memory_csr_unc_err_cnt),
4777 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4779 access_tx_read_sdma_memory_csr_err_cnt),
4780 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4782 access_tx_egress_fifo_cor_err_cnt),
4783 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4785 access_tx_read_pio_memory_cor_err_cnt),
4786 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4788 access_tx_read_sdma_memory_cor_err_cnt),
4789 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4791 access_tx_sb_hdr_cor_err_cnt),
4792 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4794 access_tx_credit_overrun_err_cnt),
4795 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4797 access_tx_launch_fifo8_cor_err_cnt),
4798 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4800 access_tx_launch_fifo7_cor_err_cnt),
4801 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4803 access_tx_launch_fifo6_cor_err_cnt),
4804 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4806 access_tx_launch_fifo5_cor_err_cnt),
4807 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4809 access_tx_launch_fifo4_cor_err_cnt),
4810 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4812 access_tx_launch_fifo3_cor_err_cnt),
4813 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4815 access_tx_launch_fifo2_cor_err_cnt),
4816 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4818 access_tx_launch_fifo1_cor_err_cnt),
4819 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4821 access_tx_launch_fifo0_cor_err_cnt),
4822 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4824 access_tx_credit_return_vl_err_cnt),
4825 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4827 access_tx_hcrc_insertion_err_cnt),
4828 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4830 access_tx_egress_fifo_unc_err_cnt),
4831 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4833 access_tx_read_pio_memory_unc_err_cnt),
4834 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4836 access_tx_read_sdma_memory_unc_err_cnt),
4837 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4839 access_tx_sb_hdr_unc_err_cnt),
4840 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4842 access_tx_credit_return_partiy_err_cnt),
4843 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4845 access_tx_launch_fifo8_unc_or_parity_err_cnt),
4846 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4848 access_tx_launch_fifo7_unc_or_parity_err_cnt),
4849 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4851 access_tx_launch_fifo6_unc_or_parity_err_cnt),
4852 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4854 access_tx_launch_fifo5_unc_or_parity_err_cnt),
4855 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4857 access_tx_launch_fifo4_unc_or_parity_err_cnt),
4858 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4860 access_tx_launch_fifo3_unc_or_parity_err_cnt),
4861 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4863 access_tx_launch_fifo2_unc_or_parity_err_cnt),
4864 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4866 access_tx_launch_fifo1_unc_or_parity_err_cnt),
4867 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4869 access_tx_launch_fifo0_unc_or_parity_err_cnt),
4870 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4872 access_tx_sdma15_disallowed_packet_err_cnt),
4873 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4875 access_tx_sdma14_disallowed_packet_err_cnt),
4876 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4878 access_tx_sdma13_disallowed_packet_err_cnt),
4879 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4881 access_tx_sdma12_disallowed_packet_err_cnt),
4882 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4884 access_tx_sdma11_disallowed_packet_err_cnt),
4885 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4887 access_tx_sdma10_disallowed_packet_err_cnt),
4888 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4890 access_tx_sdma9_disallowed_packet_err_cnt),
4891 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4893 access_tx_sdma8_disallowed_packet_err_cnt),
4894 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4896 access_tx_sdma7_disallowed_packet_err_cnt),
4897 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4899 access_tx_sdma6_disallowed_packet_err_cnt),
4900 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4902 access_tx_sdma5_disallowed_packet_err_cnt),
4903 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4905 access_tx_sdma4_disallowed_packet_err_cnt),
4906 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4908 access_tx_sdma3_disallowed_packet_err_cnt),
4909 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4911 access_tx_sdma2_disallowed_packet_err_cnt),
4912 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4914 access_tx_sdma1_disallowed_packet_err_cnt),
4915 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4917 access_tx_sdma0_disallowed_packet_err_cnt),
4918 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4920 access_tx_config_parity_err_cnt),
4921 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4923 access_tx_sbrd_ctl_csr_parity_err_cnt),
4924 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4926 access_tx_launch_csr_parity_err_cnt),
4927 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4929 access_tx_illegal_vl_err_cnt),
4930 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4931 "TxSbrdCtlStateMachineParityErr", 0, 0,
4933 access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4934 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4936 access_egress_reserved_10_err_cnt),
4937 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4939 access_egress_reserved_9_err_cnt),
4940 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4942 access_tx_sdma_launch_intf_parity_err_cnt),
4943 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4945 access_tx_pio_launch_intf_parity_err_cnt),
4946 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4948 access_egress_reserved_6_err_cnt),
4949 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4951 access_tx_incorrect_link_state_err_cnt),
4952 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4954 access_tx_linkdown_err_cnt),
4955 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4956 "EgressFifoUnderrunOrParityErr", 0, 0,
4958 access_tx_egress_fifi_underrun_or_parity_err_cnt),
4959 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4961 access_egress_reserved_2_err_cnt),
4962 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4964 access_tx_pkt_integrity_mem_unc_err_cnt),
4965 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4967 access_tx_pkt_integrity_mem_cor_err_cnt),
4969 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4971 access_send_csr_write_bad_addr_err_cnt),
4972 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4974 access_send_csr_read_bad_addr_err_cnt),
4975 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4977 access_send_csr_parity_cnt),
4978 /* SendCtxtErrStatus */
4979 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4981 access_pio_write_out_of_bounds_err_cnt),
4982 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4984 access_pio_write_overflow_err_cnt),
4985 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4987 access_pio_write_crosses_boundary_err_cnt),
4988 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4990 access_pio_disallowed_packet_err_cnt),
4991 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4993 access_pio_inconsistent_sop_err_cnt),
4994 /* SendDmaEngErrStatus */
4995 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4997 access_sdma_header_request_fifo_cor_err_cnt),
4998 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
5000 access_sdma_header_storage_cor_err_cnt),
5001 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
5003 access_sdma_packet_tracking_cor_err_cnt),
5004 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5006 access_sdma_assembly_cor_err_cnt),
5007 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5009 access_sdma_desc_table_cor_err_cnt),
5010 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5012 access_sdma_header_request_fifo_unc_err_cnt),
5013 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5015 access_sdma_header_storage_unc_err_cnt),
5016 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5018 access_sdma_packet_tracking_unc_err_cnt),
5019 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5021 access_sdma_assembly_unc_err_cnt),
5022 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5024 access_sdma_desc_table_unc_err_cnt),
5025 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5027 access_sdma_timeout_err_cnt),
5028 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5030 access_sdma_header_length_err_cnt),
5031 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5033 access_sdma_header_address_err_cnt),
5034 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5036 access_sdma_header_select_err_cnt),
5037 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5039 access_sdma_reserved_9_err_cnt),
5040 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5042 access_sdma_packet_desc_overflow_err_cnt),
5043 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5045 access_sdma_length_mismatch_err_cnt),
5046 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5048 access_sdma_halt_err_cnt),
5049 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5051 access_sdma_mem_read_err_cnt),
5052 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5054 access_sdma_first_desc_err_cnt),
5055 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5057 access_sdma_tail_out_of_bounds_err_cnt),
5058 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5060 access_sdma_too_long_err_cnt),
5061 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5063 access_sdma_gen_mismatch_err_cnt),
5064 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5066 access_sdma_wrong_dw_err_cnt),
5069 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5070 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5072 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5074 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5076 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5078 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5080 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5082 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5084 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5085 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5086 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5087 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5088 CNTR_SYNTH | CNTR_VL),
5089 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5090 CNTR_SYNTH | CNTR_VL),
5091 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5092 CNTR_SYNTH | CNTR_VL),
5093 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5094 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5095 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5096 access_sw_link_dn_cnt),
5097 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5098 access_sw_link_up_cnt),
5099 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5100 access_sw_unknown_frame_cnt),
5101 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5102 access_sw_xmit_discards),
5103 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5104 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5105 access_sw_xmit_discards),
5106 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5107 access_xmit_constraint_errs),
5108 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5109 access_rcv_constraint_errs),
5110 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5111 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5112 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5113 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5114 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5115 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5116 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5117 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5118 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5119 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5120 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5121 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5122 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5123 access_sw_cpu_rc_acks),
5124 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5125 access_sw_cpu_rc_qacks),
5126 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5127 access_sw_cpu_rc_delayed_comp),
5128 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5129 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5130 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5131 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5132 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5133 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5134 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5135 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5136 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5137 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5138 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5139 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5140 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5141 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5142 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5143 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5144 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5145 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5146 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5147 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5148 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5149 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5150 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5151 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5152 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5153 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5154 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5155 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5156 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5157 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5158 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5159 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5160 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5161 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5162 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5163 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5164 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5165 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5166 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5167 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5168 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5169 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5170 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5171 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5172 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5173 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5174 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5175 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5176 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5177 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5178 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5179 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5180 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5181 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5182 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5183 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5184 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5185 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5186 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5187 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5188 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5189 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5190 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5191 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5192 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5193 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5194 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5195 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5196 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5197 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5198 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5199 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5200 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5201 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5202 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5203 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5204 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5205 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5206 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5207 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5210 /* ======================================================================== */
5212 /* return true if this is chip revision revision a */
5213 int is_ax(struct hfi1_devdata *dd)
5216 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5217 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5218 return (chip_rev_minor & 0xf0) == 0;
5221 /* return true if this is chip revision revision b */
5222 int is_bx(struct hfi1_devdata *dd)
5225 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5226 & CCE_REVISION_CHIP_REV_MINOR_MASK;
5227 return (chip_rev_minor & 0xF0) == 0x10;
5230 /* return true is kernel urg disabled for rcd */
5231 bool is_urg_masked(struct hfi1_ctxtdata *rcd)
5234 u32 is = IS_RCVURGENT_START + rcd->ctxt;
5237 mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
5238 return !(mask & BIT_ULL(bit));
5242 * Append string s to buffer buf. Arguments curp and len are the current
5243 * position and remaining length, respectively.
5245 * return 0 on success, 1 on out of room
5247 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5251 int result = 0; /* success */
5254 /* add a comma, if first in the buffer */
5257 result = 1; /* out of room */
5264 /* copy the string */
5265 while ((c = *s++) != 0) {
5267 result = 1; /* out of room */
5275 /* write return values */
5283 * Using the given flag table, print a comma separated string into
5284 * the buffer. End in '*' if the buffer is too short.
5286 static char *flag_string(char *buf, int buf_len, u64 flags,
5287 struct flag_table *table, int table_size)
5295 /* make sure there is at least 2 so we can form "*" */
5299 len--; /* leave room for a nul */
5300 for (i = 0; i < table_size; i++) {
5301 if (flags & table[i].flag) {
5302 no_room = append_str(buf, &p, &len, table[i].str);
5305 flags &= ~table[i].flag;
5309 /* any undocumented bits left? */
5310 if (!no_room && flags) {
5311 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5312 no_room = append_str(buf, &p, &len, extra);
5315 /* add * if ran out of room */
5317 /* may need to back up to add space for a '*' */
5323 /* add final nul - space already allocated above */
5328 /* first 8 CCE error interrupt source names */
5329 static const char * const cce_misc_names[] = {
5330 "CceErrInt", /* 0 */
5331 "RxeErrInt", /* 1 */
5332 "MiscErrInt", /* 2 */
5333 "Reserved3", /* 3 */
5334 "PioErrInt", /* 4 */
5335 "SDmaErrInt", /* 5 */
5336 "EgressErrInt", /* 6 */
5341 * Return the miscellaneous error interrupt name.
5343 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5345 if (source < ARRAY_SIZE(cce_misc_names))
5346 strncpy(buf, cce_misc_names[source], bsize);
5348 snprintf(buf, bsize, "Reserved%u",
5349 source + IS_GENERAL_ERR_START);
5355 * Return the SDMA engine error interrupt name.
5357 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5359 snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5364 * Return the send context error interrupt name.
5366 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5368 snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5372 static const char * const various_names[] = {
5381 * Return the various interrupt name.
5383 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5385 if (source < ARRAY_SIZE(various_names))
5386 strncpy(buf, various_names[source], bsize);
5388 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5393 * Return the DC interrupt name.
5395 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5397 static const char * const dc_int_names[] = {
5401 "lbm" /* local block merge */
5404 if (source < ARRAY_SIZE(dc_int_names))
5405 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5407 snprintf(buf, bsize, "DCInt%u", source);
5411 static const char * const sdma_int_names[] = {
5418 * Return the SDMA engine interrupt name.
5420 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5422 /* what interrupt */
5423 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
5425 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5427 if (likely(what < 3))
5428 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5430 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5435 * Return the receive available interrupt name.
5437 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5439 snprintf(buf, bsize, "RcvAvailInt%u", source);
5444 * Return the receive urgent interrupt name.
5446 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5448 snprintf(buf, bsize, "RcvUrgentInt%u", source);
5453 * Return the send credit interrupt name.
5455 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5457 snprintf(buf, bsize, "SendCreditInt%u", source);
5462 * Return the reserved interrupt name.
5464 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5466 snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5470 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5472 return flag_string(buf, buf_len, flags,
5473 cce_err_status_flags,
5474 ARRAY_SIZE(cce_err_status_flags));
5477 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5479 return flag_string(buf, buf_len, flags,
5480 rxe_err_status_flags,
5481 ARRAY_SIZE(rxe_err_status_flags));
5484 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5486 return flag_string(buf, buf_len, flags, misc_err_status_flags,
5487 ARRAY_SIZE(misc_err_status_flags));
5490 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5492 return flag_string(buf, buf_len, flags,
5493 pio_err_status_flags,
5494 ARRAY_SIZE(pio_err_status_flags));
5497 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5499 return flag_string(buf, buf_len, flags,
5500 sdma_err_status_flags,
5501 ARRAY_SIZE(sdma_err_status_flags));
5504 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5506 return flag_string(buf, buf_len, flags,
5507 egress_err_status_flags,
5508 ARRAY_SIZE(egress_err_status_flags));
5511 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5513 return flag_string(buf, buf_len, flags,
5514 egress_err_info_flags,
5515 ARRAY_SIZE(egress_err_info_flags));
5518 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5520 return flag_string(buf, buf_len, flags,
5521 send_err_status_flags,
5522 ARRAY_SIZE(send_err_status_flags));
5525 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5531 * For most these errors, there is nothing that can be done except
5532 * report or record it.
5534 dd_dev_info(dd, "CCE Error: %s\n",
5535 cce_err_status_string(buf, sizeof(buf), reg));
5537 if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5538 is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5539 /* this error requires a manual drop into SPC freeze mode */
5541 start_freeze_handling(dd->pport, FREEZE_SELF);
5544 for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5545 if (reg & (1ull << i)) {
5546 incr_cntr64(&dd->cce_err_status_cnt[i]);
5547 /* maintain a counter over all cce_err_status errors */
5548 incr_cntr64(&dd->sw_cce_err_status_aggregate);
5554 * Check counters for receive errors that do not have an interrupt
5555 * associated with them.
5557 #define RCVERR_CHECK_TIME 10
5558 static void update_rcverr_timer(struct timer_list *t)
5560 struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5561 struct hfi1_pportdata *ppd = dd->pport;
5562 u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5564 if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5565 ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5566 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5567 set_link_down_reason(
5568 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5569 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5570 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5572 dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5574 mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5577 static int init_rcverr(struct hfi1_devdata *dd)
5579 timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5580 /* Assume the hardware counter has been reset */
5581 dd->rcv_ovfl_cnt = 0;
5582 return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5585 static void free_rcverr(struct hfi1_devdata *dd)
5587 if (dd->rcverr_timer.function)
5588 del_timer_sync(&dd->rcverr_timer);
5591 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5596 dd_dev_info(dd, "Receive Error: %s\n",
5597 rxe_err_status_string(buf, sizeof(buf), reg));
5599 if (reg & ALL_RXE_FREEZE_ERR) {
5603 * Freeze mode recovery is disabled for the errors
5604 * in RXE_FREEZE_ABORT_MASK
5606 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5607 flags = FREEZE_ABORT;
5609 start_freeze_handling(dd->pport, flags);
5612 for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5613 if (reg & (1ull << i))
5614 incr_cntr64(&dd->rcv_err_status_cnt[i]);
5618 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5623 dd_dev_info(dd, "Misc Error: %s",
5624 misc_err_status_string(buf, sizeof(buf), reg));
5625 for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5626 if (reg & (1ull << i))
5627 incr_cntr64(&dd->misc_err_status_cnt[i]);
5631 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5636 dd_dev_info(dd, "PIO Error: %s\n",
5637 pio_err_status_string(buf, sizeof(buf), reg));
5639 if (reg & ALL_PIO_FREEZE_ERR)
5640 start_freeze_handling(dd->pport, 0);
5642 for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5643 if (reg & (1ull << i))
5644 incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5648 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5653 dd_dev_info(dd, "SDMA Error: %s\n",
5654 sdma_err_status_string(buf, sizeof(buf), reg));
5656 if (reg & ALL_SDMA_FREEZE_ERR)
5657 start_freeze_handling(dd->pport, 0);
5659 for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5660 if (reg & (1ull << i))
5661 incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5665 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5667 incr_cntr64(&ppd->port_xmit_discards);
5670 static void count_port_inactive(struct hfi1_devdata *dd)
5672 __count_port_discards(dd->pport);
5676 * We have had a "disallowed packet" error during egress. Determine the
5677 * integrity check which failed, and update relevant error counter, etc.
5679 * Note that the SEND_EGRESS_ERR_INFO register has only a single
5680 * bit of state per integrity check, and so we can miss the reason for an
5681 * egress error if more than one packet fails the same integrity check
5682 * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5684 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5687 struct hfi1_pportdata *ppd = dd->pport;
5688 u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5689 u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5692 /* clear down all observed info as quickly as possible after read */
5693 write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5696 "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5697 info, egress_err_info_string(buf, sizeof(buf), info), src);
5699 /* Eventually add other counters for each bit */
5700 if (info & PORT_DISCARD_EGRESS_ERRS) {
5704 * Count all applicable bits as individual errors and
5705 * attribute them to the packet that triggered this handler.
5706 * This may not be completely accurate due to limitations
5707 * on the available hardware error information. There is
5708 * a single information register and any number of error
5709 * packets may have occurred and contributed to it before
5710 * this routine is called. This means that:
5711 * a) If multiple packets with the same error occur before
5712 * this routine is called, earlier packets are missed.
5713 * There is only a single bit for each error type.
5714 * b) Errors may not be attributed to the correct VL.
5715 * The driver is attributing all bits in the info register
5716 * to the packet that triggered this call, but bits
5717 * could be an accumulation of different packets with
5719 * c) A single error packet may have multiple counts attached
5720 * to it. There is no way for the driver to know if
5721 * multiple bits set in the info register are due to a
5722 * single packet or multiple packets. The driver assumes
5725 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5726 for (i = 0; i < weight; i++) {
5727 __count_port_discards(ppd);
5728 if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5729 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5731 incr_cntr64(&ppd->port_xmit_discards_vl
5738 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5739 * register. Does it represent a 'port inactive' error?
5741 static inline int port_inactive_err(u64 posn)
5743 return (posn >= SEES(TX_LINKDOWN) &&
5744 posn <= SEES(TX_INCORRECT_LINK_STATE));
5748 * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5749 * register. Does it represent a 'disallowed packet' error?
5751 static inline int disallowed_pkt_err(int posn)
5753 return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5754 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5758 * Input value is a bit position of one of the SDMA engine disallowed
5759 * packet errors. Return which engine. Use of this must be guarded by
5760 * disallowed_pkt_err().
5762 static inline int disallowed_pkt_engine(int posn)
5764 return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5768 * Translate an SDMA engine to a VL. Return -1 if the tranlation cannot
5771 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5773 struct sdma_vl_map *m;
5777 if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5781 m = rcu_dereference(dd->sdma_map);
5782 vl = m->engine_to_vl[engine];
5789 * Translate the send context (sofware index) into a VL. Return -1 if the
5790 * translation cannot be done.
5792 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5794 struct send_context_info *sci;
5795 struct send_context *sc;
5798 sci = &dd->send_contexts[sw_index];
5800 /* there is no information for user (PSM) and ack contexts */
5801 if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5807 if (dd->vld[15].sc == sc)
5809 for (i = 0; i < num_vls; i++)
5810 if (dd->vld[i].sc == sc)
5816 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5818 u64 reg_copy = reg, handled = 0;
5822 if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5823 start_freeze_handling(dd->pport, 0);
5824 else if (is_ax(dd) &&
5825 (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5826 (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5827 start_freeze_handling(dd->pport, 0);
5830 int posn = fls64(reg_copy);
5831 /* fls64() returns a 1-based offset, we want it zero based */
5832 int shift = posn - 1;
5833 u64 mask = 1ULL << shift;
5835 if (port_inactive_err(shift)) {
5836 count_port_inactive(dd);
5838 } else if (disallowed_pkt_err(shift)) {
5839 int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5841 handle_send_egress_err_info(dd, vl);
5850 dd_dev_info(dd, "Egress Error: %s\n",
5851 egress_err_status_string(buf, sizeof(buf), reg));
5853 for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5854 if (reg & (1ull << i))
5855 incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5859 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5864 dd_dev_info(dd, "Send Error: %s\n",
5865 send_err_status_string(buf, sizeof(buf), reg));
5867 for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5868 if (reg & (1ull << i))
5869 incr_cntr64(&dd->send_err_status_cnt[i]);
5874 * The maximum number of times the error clear down will loop before
5875 * blocking a repeating error. This value is arbitrary.
5877 #define MAX_CLEAR_COUNT 20
5880 * Clear and handle an error register. All error interrupts are funneled
5881 * through here to have a central location to correctly handle single-
5882 * or multi-shot errors.
5884 * For non per-context registers, call this routine with a context value
5885 * of 0 so the per-context offset is zero.
5887 * If the handler loops too many times, assume that something is wrong
5888 * and can't be fixed, so mask the error bits.
5890 static void interrupt_clear_down(struct hfi1_devdata *dd,
5892 const struct err_reg_info *eri)
5897 /* read in a loop until no more errors are seen */
5900 reg = read_kctxt_csr(dd, context, eri->status);
5903 write_kctxt_csr(dd, context, eri->clear, reg);
5904 if (likely(eri->handler))
5905 eri->handler(dd, context, reg);
5907 if (count > MAX_CLEAR_COUNT) {
5910 dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5913 * Read-modify-write so any other masked bits
5916 mask = read_kctxt_csr(dd, context, eri->mask);
5918 write_kctxt_csr(dd, context, eri->mask, mask);
5925 * CCE block "misc" interrupt. Source is < 16.
5927 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5929 const struct err_reg_info *eri = &misc_errs[source];
5932 interrupt_clear_down(dd, 0, eri);
5934 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5939 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5941 return flag_string(buf, buf_len, flags,
5942 sc_err_status_flags,
5943 ARRAY_SIZE(sc_err_status_flags));
5947 * Send context error interrupt. Source (hw_context) is < 160.
5949 * All send context errors cause the send context to halt. The normal
5950 * clear-down mechanism cannot be used because we cannot clear the
5951 * error bits until several other long-running items are done first.
5952 * This is OK because with the context halted, nothing else is going
5953 * to happen on it anyway.
5955 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5956 unsigned int hw_context)
5958 struct send_context_info *sci;
5959 struct send_context *sc;
5964 unsigned long irq_flags;
5966 sw_index = dd->hw_to_sw[hw_context];
5967 if (sw_index >= dd->num_send_contexts) {
5969 "out of range sw index %u for send context %u\n",
5970 sw_index, hw_context);
5973 sci = &dd->send_contexts[sw_index];
5974 spin_lock_irqsave(&dd->sc_lock, irq_flags);
5977 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5978 sw_index, hw_context);
5979 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5983 /* tell the software that a halt has begun */
5984 sc_stop(sc, SCF_HALTED);
5986 status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5988 dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5989 send_context_err_status_string(flags, sizeof(flags),
5992 if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5993 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5996 * Automatically restart halted kernel contexts out of interrupt
5997 * context. User contexts must ask the driver to restart the context.
5999 if (sc->type != SC_USER)
6000 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
6001 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6004 * Update the counters for the corresponding status bits.
6005 * Note that these particular counters are aggregated over all
6008 for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6009 if (status & (1ull << i))
6010 incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6014 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6015 unsigned int source, u64 status)
6017 struct sdma_engine *sde;
6020 sde = &dd->per_sdma[source];
6021 #ifdef CONFIG_SDMA_VERBOSITY
6022 dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6023 slashstrip(__FILE__), __LINE__, __func__);
6024 dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6025 sde->this_idx, source, (unsigned long long)status);
6028 sdma_engine_error(sde, status);
6031 * Update the counters for the corresponding status bits.
6032 * Note that these particular counters are aggregated over
6033 * all 16 DMA engines.
6035 for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6036 if (status & (1ull << i))
6037 incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6042 * CCE block SDMA error interrupt. Source is < 16.
6044 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6046 #ifdef CONFIG_SDMA_VERBOSITY
6047 struct sdma_engine *sde = &dd->per_sdma[source];
6049 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6050 slashstrip(__FILE__), __LINE__, __func__);
6051 dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6053 sdma_dumpstate(sde);
6055 interrupt_clear_down(dd, source, &sdma_eng_err);
6059 * CCE block "various" interrupt. Source is < 8.
6061 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6063 const struct err_reg_info *eri = &various_err[source];
6066 * TCritInt cannot go through interrupt_clear_down()
6067 * because it is not a second tier interrupt. The handler
6068 * should be called directly.
6070 if (source == TCRIT_INT_SOURCE)
6071 handle_temp_err(dd);
6072 else if (eri->handler)
6073 interrupt_clear_down(dd, 0, eri);
6076 "%s: Unimplemented/reserved interrupt %d\n",
6080 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6082 /* src_ctx is always zero */
6083 struct hfi1_pportdata *ppd = dd->pport;
6084 unsigned long flags;
6085 u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6087 if (reg & QSFP_HFI0_MODPRST_N) {
6088 if (!qsfp_mod_present(ppd)) {
6089 dd_dev_info(dd, "%s: QSFP module removed\n",
6092 ppd->driver_link_ready = 0;
6094 * Cable removed, reset all our information about the
6095 * cache and cable capabilities
6098 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6100 * We don't set cache_refresh_required here as we expect
6101 * an interrupt when a cable is inserted
6103 ppd->qsfp_info.cache_valid = 0;
6104 ppd->qsfp_info.reset_needed = 0;
6105 ppd->qsfp_info.limiting_active = 0;
6106 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6108 /* Invert the ModPresent pin now to detect plug-in */
6109 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6110 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6112 if ((ppd->offline_disabled_reason >
6114 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6115 (ppd->offline_disabled_reason ==
6116 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6117 ppd->offline_disabled_reason =
6119 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6121 if (ppd->host_link_state == HLS_DN_POLL) {
6123 * The link is still in POLL. This means
6124 * that the normal link down processing
6125 * will not happen. We have to do it here
6126 * before turning the DC off.
6128 queue_work(ppd->link_wq, &ppd->link_down_work);
6131 dd_dev_info(dd, "%s: QSFP module inserted\n",
6134 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6135 ppd->qsfp_info.cache_valid = 0;
6136 ppd->qsfp_info.cache_refresh_required = 1;
6137 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6141 * Stop inversion of ModPresent pin to detect
6142 * removal of the cable
6144 qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6145 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6146 ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6148 ppd->offline_disabled_reason =
6149 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6153 if (reg & QSFP_HFI0_INT_N) {
6154 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6156 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6157 ppd->qsfp_info.check_interrupt_flags = 1;
6158 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6161 /* Schedule the QSFP work only if there is a cable attached. */
6162 if (qsfp_mod_present(ppd))
6163 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6166 static int request_host_lcb_access(struct hfi1_devdata *dd)
6170 ret = do_8051_command(dd, HCMD_MISC,
6171 (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6172 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6173 if (ret != HCMD_SUCCESS) {
6174 dd_dev_err(dd, "%s: command failed with error %d\n",
6177 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6180 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6184 ret = do_8051_command(dd, HCMD_MISC,
6185 (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6186 LOAD_DATA_FIELD_ID_SHIFT, NULL);
6187 if (ret != HCMD_SUCCESS) {
6188 dd_dev_err(dd, "%s: command failed with error %d\n",
6191 return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6195 * Set the LCB selector - allow host access. The DCC selector always
6196 * points to the host.
6198 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6200 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6201 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6202 DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6206 * Clear the LCB selector - allow 8051 access. The DCC selector always
6207 * points to the host.
6209 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6211 write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6212 DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6216 * Acquire LCB access from the 8051. If the host already has access,
6217 * just increment a counter. Otherwise, inform the 8051 that the
6218 * host is taking access.
6222 * -EBUSY if the 8051 has control and cannot be disturbed
6223 * -errno if unable to acquire access from the 8051
6225 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6227 struct hfi1_pportdata *ppd = dd->pport;
6231 * Use the host link state lock so the operation of this routine
6232 * { link state check, selector change, count increment } can occur
6233 * as a unit against a link state change. Otherwise there is a
6234 * race between the state change and the count increment.
6237 mutex_lock(&ppd->hls_lock);
6239 while (!mutex_trylock(&ppd->hls_lock))
6243 /* this access is valid only when the link is up */
6244 if (ppd->host_link_state & HLS_DOWN) {
6245 dd_dev_info(dd, "%s: link state %s not up\n",
6246 __func__, link_state_name(ppd->host_link_state));
6251 if (dd->lcb_access_count == 0) {
6252 ret = request_host_lcb_access(dd);
6255 "%s: unable to acquire LCB access, err %d\n",
6259 set_host_lcb_access(dd);
6261 dd->lcb_access_count++;
6263 mutex_unlock(&ppd->hls_lock);
6268 * Release LCB access by decrementing the use count. If the count is moving
6269 * from 1 to 0, inform 8051 that it has control back.
6273 * -errno if unable to release access to the 8051
6275 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6280 * Use the host link state lock because the acquire needed it.
6281 * Here, we only need to keep { selector change, count decrement }
6285 mutex_lock(&dd->pport->hls_lock);
6287 while (!mutex_trylock(&dd->pport->hls_lock))
6291 if (dd->lcb_access_count == 0) {
6292 dd_dev_err(dd, "%s: LCB access count is zero. Skipping.\n",
6297 if (dd->lcb_access_count == 1) {
6298 set_8051_lcb_access(dd);
6299 ret = request_8051_lcb_access(dd);
6302 "%s: unable to release LCB access, err %d\n",
6304 /* restore host access if the grant didn't work */
6305 set_host_lcb_access(dd);
6309 dd->lcb_access_count--;
6311 mutex_unlock(&dd->pport->hls_lock);
6316 * Initialize LCB access variables and state. Called during driver load,
6317 * after most of the initialization is finished.
6319 * The DC default is LCB access on for the host. The driver defaults to
6320 * leaving access to the 8051. Assign access now - this constrains the call
6321 * to this routine to be after all LCB set-up is done. In particular, after
6322 * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6324 static void init_lcb_access(struct hfi1_devdata *dd)
6326 dd->lcb_access_count = 0;
6330 * Write a response back to a 8051 request.
6332 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6334 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6335 DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6337 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6338 (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6342 * Handle host requests from the 8051.
6344 static void handle_8051_request(struct hfi1_pportdata *ppd)
6346 struct hfi1_devdata *dd = ppd->dd;
6351 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6352 if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6353 return; /* no request */
6355 /* zero out COMPLETED so the response is seen */
6356 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6358 /* extract request details */
6359 type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6360 & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6361 data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6362 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6365 case HREQ_LOAD_CONFIG:
6366 case HREQ_SAVE_CONFIG:
6367 case HREQ_READ_CONFIG:
6368 case HREQ_SET_TX_EQ_ABS:
6369 case HREQ_SET_TX_EQ_REL:
6371 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6373 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6375 case HREQ_LCB_RESET:
6376 /* Put the LCB, RX FPE and TX FPE into reset */
6377 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6378 /* Make sure the write completed */
6379 (void)read_csr(dd, DCC_CFG_RESET);
6380 /* Hold the reset long enough to take effect */
6382 /* Take the LCB, RX FPE and TX FPE out of reset */
6383 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6384 hreq_response(dd, HREQ_SUCCESS, 0);
6387 case HREQ_CONFIG_DONE:
6388 hreq_response(dd, HREQ_SUCCESS, 0);
6391 case HREQ_INTERFACE_TEST:
6392 hreq_response(dd, HREQ_SUCCESS, data);
6395 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6396 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6402 * Set up allocation unit vaulue.
6404 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6406 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6408 /* do not modify other values in the register */
6409 reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6410 reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6411 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6415 * Set up initial VL15 credits of the remote. Assumes the rest of
6416 * the CM credit registers are zero from a previous global or credit reset.
6417 * Shared limit for VL15 will always be 0.
6419 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6421 u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6423 /* set initial values for total and shared credit limit */
6424 reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6425 SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6428 * Set total limit to be equal to VL15 credits.
6429 * Leave shared limit at 0.
6431 reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6432 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6434 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6435 << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6439 * Zero all credit details from the previous connection and
6440 * reset the CM manager's internal counters.
6442 void reset_link_credits(struct hfi1_devdata *dd)
6446 /* remove all previous VL credit limits */
6447 for (i = 0; i < TXE_NUM_DATA_VL; i++)
6448 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6449 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6450 write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6451 /* reset the CM block */
6452 pio_send_control(dd, PSC_CM_RESET);
6453 /* reset cached value */
6454 dd->vl15buf_cached = 0;
6457 /* convert a vCU to a CU */
6458 static u32 vcu_to_cu(u8 vcu)
6463 /* convert a CU to a vCU */
6464 static u8 cu_to_vcu(u32 cu)
6469 /* convert a vAU to an AU */
6470 static u32 vau_to_au(u8 vau)
6472 return 8 * (1 << vau);
6475 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6477 ppd->sm_trap_qp = 0x0;
6482 * Graceful LCB shutdown. This leaves the LCB FIFOs in reset.
6484 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6488 /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6489 write_csr(dd, DC_LCB_CFG_RUN, 0);
6490 /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6491 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6492 1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6493 /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6494 dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6495 reg = read_csr(dd, DCC_CFG_RESET);
6496 write_csr(dd, DCC_CFG_RESET, reg |
6497 DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6498 (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6500 udelay(1); /* must hold for the longer of 16cclks or 20ns */
6501 write_csr(dd, DCC_CFG_RESET, reg);
6502 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6507 * This routine should be called after the link has been transitioned to
6508 * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6511 * The expectation is that the caller of this routine would have taken
6512 * care of properly transitioning the link into the correct state.
6513 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6514 * before calling this function.
6516 static void _dc_shutdown(struct hfi1_devdata *dd)
6518 lockdep_assert_held(&dd->dc8051_lock);
6520 if (dd->dc_shutdown)
6523 dd->dc_shutdown = 1;
6524 /* Shutdown the LCB */
6525 lcb_shutdown(dd, 1);
6527 * Going to OFFLINE would have causes the 8051 to put the
6528 * SerDes into reset already. Just need to shut down the 8051,
6531 write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6534 static void dc_shutdown(struct hfi1_devdata *dd)
6536 mutex_lock(&dd->dc8051_lock);
6538 mutex_unlock(&dd->dc8051_lock);
6542 * Calling this after the DC has been brought out of reset should not
6544 * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6545 * before calling this function.
6547 static void _dc_start(struct hfi1_devdata *dd)
6549 lockdep_assert_held(&dd->dc8051_lock);
6551 if (!dd->dc_shutdown)
6554 /* Take the 8051 out of reset */
6555 write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6556 /* Wait until 8051 is ready */
6557 if (wait_fm_ready(dd, TIMEOUT_8051_START))
6558 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6561 /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6562 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6563 /* lcb_shutdown() with abort=1 does not restore these */
6564 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6565 dd->dc_shutdown = 0;
6568 static void dc_start(struct hfi1_devdata *dd)
6570 mutex_lock(&dd->dc8051_lock);
6572 mutex_unlock(&dd->dc8051_lock);
6576 * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6578 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6580 u64 rx_radr, tx_radr;
6583 if (dd->icode != ICODE_FPGA_EMULATION)
6587 * These LCB defaults on emulator _s are good, nothing to do here:
6588 * LCB_CFG_TX_FIFOS_RADR
6589 * LCB_CFG_RX_FIFOS_RADR
6591 * LCB_CFG_IGNORE_LOST_RCLK
6593 if (is_emulator_s(dd))
6595 /* else this is _p */
6597 version = emulator_rev(dd);
6599 version = 0x2d; /* all B0 use 0x2d or higher settings */
6601 if (version <= 0x12) {
6602 /* release 0x12 and below */
6605 * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6606 * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6607 * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6610 0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6611 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6612 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6614 * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6615 * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6617 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6618 } else if (version <= 0x18) {
6619 /* release 0x13 up to 0x18 */
6620 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6622 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6623 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6624 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6625 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6626 } else if (version == 0x19) {
6628 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6630 0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6631 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6632 | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6633 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6634 } else if (version == 0x1a) {
6636 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6638 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6639 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6640 | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6641 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6642 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6644 /* release 0x1b and higher */
6645 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6647 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6648 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6649 | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6650 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6653 write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6654 /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6655 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6656 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6657 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6661 * Handle a SMA idle message
6663 * This is a work-queue function outside of the interrupt.
6665 void handle_sma_message(struct work_struct *work)
6667 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6669 struct hfi1_devdata *dd = ppd->dd;
6674 * msg is bytes 1-4 of the 40-bit idle message - the command code
6677 ret = read_idle_sma(dd, &msg);
6680 dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6682 * React to the SMA message. Byte[1] (0 for us) is the command.
6684 switch (msg & 0xff) {
6687 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6690 * Only expected in INIT or ARMED, discard otherwise.
6692 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6693 ppd->neighbor_normal = 1;
6695 case SMA_IDLE_ACTIVE:
6697 * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6700 * Can activate the node. Discard otherwise.
6702 if (ppd->host_link_state == HLS_UP_ARMED &&
6703 ppd->is_active_optimize_enabled) {
6704 ppd->neighbor_normal = 1;
6705 ret = set_link_state(ppd, HLS_UP_ACTIVE);
6709 "%s: received Active SMA idle message, couldn't set link to Active\n",
6715 "%s: received unexpected SMA idle message 0x%llx\n",
6721 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6724 unsigned long flags;
6726 spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6727 rcvctrl = read_csr(dd, RCV_CTRL);
6730 write_csr(dd, RCV_CTRL, rcvctrl);
6731 spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6734 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6736 adjust_rcvctrl(dd, add, 0);
6739 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6741 adjust_rcvctrl(dd, 0, clear);
6745 * Called from all interrupt handlers to start handling an SPC freeze.
6747 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6749 struct hfi1_devdata *dd = ppd->dd;
6750 struct send_context *sc;
6754 if (flags & FREEZE_SELF)
6755 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6757 /* enter frozen mode */
6758 dd->flags |= HFI1_FROZEN;
6760 /* notify all SDMA engines that they are going into a freeze */
6761 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6763 sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6765 /* do halt pre-handling on all enabled send contexts */
6766 for (i = 0; i < dd->num_send_contexts; i++) {
6767 sc = dd->send_contexts[i].sc;
6768 if (sc && (sc->flags & SCF_ENABLED))
6769 sc_stop(sc, sc_flags);
6772 /* Send context are frozen. Notify user space */
6773 hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6775 if (flags & FREEZE_ABORT) {
6777 "Aborted freeze recovery. Please REBOOT system\n");
6780 /* queue non-interrupt handler */
6781 queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6785 * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6786 * depending on the "freeze" parameter.
6788 * No need to return an error if it times out, our only option
6789 * is to proceed anyway.
6791 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6793 unsigned long timeout;
6796 timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6798 reg = read_csr(dd, CCE_STATUS);
6800 /* waiting until all indicators are set */
6801 if ((reg & ALL_FROZE) == ALL_FROZE)
6802 return; /* all done */
6804 /* waiting until all indicators are clear */
6805 if ((reg & ALL_FROZE) == 0)
6806 return; /* all done */
6809 if (time_after(jiffies, timeout)) {
6811 "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6812 freeze ? "" : "un", reg & ALL_FROZE,
6813 freeze ? ALL_FROZE : 0ull);
6816 usleep_range(80, 120);
6821 * Do all freeze handling for the RXE block.
6823 static void rxe_freeze(struct hfi1_devdata *dd)
6826 struct hfi1_ctxtdata *rcd;
6829 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6831 /* disable all receive contexts */
6832 for (i = 0; i < dd->num_rcv_contexts; i++) {
6833 rcd = hfi1_rcd_get_by_index(dd, i);
6834 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6840 * Unfreeze handling for the RXE block - kernel contexts only.
6841 * This will also enable the port. User contexts will do unfreeze
6842 * handling on a per-context basis as they call into the driver.
6845 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6849 struct hfi1_ctxtdata *rcd;
6851 /* enable all kernel contexts */
6852 for (i = 0; i < dd->num_rcv_contexts; i++) {
6853 rcd = hfi1_rcd_get_by_index(dd, i);
6855 /* Ensure all non-user contexts(including vnic) are enabled */
6857 (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6861 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6862 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6863 rcvmask |= rcd->rcvhdrtail_kvaddr ?
6864 HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6865 hfi1_rcvctrl(dd, rcvmask, rcd);
6870 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6874 * Non-interrupt SPC freeze handling.
6876 * This is a work-queue function outside of the triggering interrupt.
6878 void handle_freeze(struct work_struct *work)
6880 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6882 struct hfi1_devdata *dd = ppd->dd;
6884 /* wait for freeze indicators on all affected blocks */
6885 wait_for_freeze_status(dd, 1);
6887 /* SPC is now frozen */
6889 /* do send PIO freeze steps */
6892 /* do send DMA freeze steps */
6895 /* do send egress freeze steps - nothing to do */
6897 /* do receive freeze steps */
6901 * Unfreeze the hardware - clear the freeze, wait for each
6902 * block's frozen bit to clear, then clear the frozen flag.
6904 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6905 wait_for_freeze_status(dd, 0);
6908 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6909 wait_for_freeze_status(dd, 1);
6910 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6911 wait_for_freeze_status(dd, 0);
6914 /* do send PIO unfreeze steps for kernel contexts */
6915 pio_kernel_unfreeze(dd);
6917 /* do send DMA unfreeze steps */
6920 /* do send egress unfreeze steps - nothing to do */
6922 /* do receive unfreeze steps for kernel contexts */
6923 rxe_kernel_unfreeze(dd);
6926 * The unfreeze procedure touches global device registers when
6927 * it disables and re-enables RXE. Mark the device unfrozen
6928 * after all that is done so other parts of the driver waiting
6929 * for the device to unfreeze don't do things out of order.
6931 * The above implies that the meaning of HFI1_FROZEN flag is
6932 * "Device has gone into freeze mode and freeze mode handling
6933 * is still in progress."
6935 * The flag will be removed when freeze mode processing has
6938 dd->flags &= ~HFI1_FROZEN;
6939 wake_up(&dd->event_queue);
6941 /* no longer frozen */
6945 * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6947 * @ppd: info of physical Hfi port
6948 * @link_width: new link width after link up or downgrade
6950 * Update the PortXmitWait and PortVlXmitWait counters after
6951 * a link up or downgrade event to reflect a link width change.
6953 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6959 tx_width = tx_link_width(link_width);
6960 link_speed = get_link_speed(ppd->link_speed_active);
6963 * There are C_VL_COUNT number of PortVLXmitWait counters.
6964 * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6966 for (i = 0; i < C_VL_COUNT + 1; i++)
6967 get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6971 * Handle a link up interrupt from the 8051.
6973 * This is a work-queue function outside of the interrupt.
6975 void handle_link_up(struct work_struct *work)
6977 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6979 struct hfi1_devdata *dd = ppd->dd;
6981 set_link_state(ppd, HLS_UP_INIT);
6983 /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6986 * OPA specifies that certain counters are cleared on a transition
6987 * to link up, so do that.
6989 clear_linkup_counters(dd);
6991 * And (re)set link up default values.
6993 set_linkup_defaults(ppd);
6996 * Set VL15 credits. Use cached value from verify cap interrupt.
6997 * In case of quick linkup or simulator, vl15 value will be set by
6998 * handle_linkup_change. VerifyCap interrupt handler will not be
6999 * called in those scenarios.
7001 if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
7002 set_up_vl15(dd, dd->vl15buf_cached);
7004 /* enforce link speed enabled */
7005 if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
7006 /* oops - current speed is not enabled, bounce */
7008 "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
7009 ppd->link_speed_active, ppd->link_speed_enabled);
7010 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7011 OPA_LINKDOWN_REASON_SPEED_POLICY);
7012 set_link_state(ppd, HLS_DN_OFFLINE);
7018 * Several pieces of LNI information were cached for SMA in ppd.
7019 * Reset these on link down
7021 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7023 ppd->neighbor_guid = 0;
7024 ppd->neighbor_port_number = 0;
7025 ppd->neighbor_type = 0;
7026 ppd->neighbor_fm_security = 0;
7029 static const char * const link_down_reason_strs[] = {
7030 [OPA_LINKDOWN_REASON_NONE] = "None",
7031 [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7032 [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7033 [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7034 [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7035 [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7036 [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7037 [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7038 [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7039 [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7040 [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7041 [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7042 [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7043 [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7044 [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7045 [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7046 [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7047 [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7048 [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7049 [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7050 [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7051 [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7052 [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7053 [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7054 [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7055 [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7056 [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7057 [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7058 [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7059 [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7060 [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7061 [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7062 [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7063 "Excessive buffer overrun",
7064 [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7065 [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7066 [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7067 [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7068 [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7069 [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7070 [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7071 [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7072 "Local media not installed",
7073 [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7074 [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7075 [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7076 "End to end not installed",
7077 [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7078 [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7079 [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7080 [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7081 [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7082 [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7085 /* return the neighbor link down reason string */
7086 static const char *link_down_reason_str(u8 reason)
7088 const char *str = NULL;
7090 if (reason < ARRAY_SIZE(link_down_reason_strs))
7091 str = link_down_reason_strs[reason];
7099 * Handle a link down interrupt from the 8051.
7101 * This is a work-queue function outside of the interrupt.
7103 void handle_link_down(struct work_struct *work)
7105 u8 lcl_reason, neigh_reason = 0;
7106 u8 link_down_reason;
7107 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7110 static const char ldr_str[] = "Link down reason: ";
7112 if ((ppd->host_link_state &
7113 (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7114 ppd->port_type == PORT_TYPE_FIXED)
7115 ppd->offline_disabled_reason =
7116 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7118 /* Go offline first, then deal with reading/writing through 8051 */
7119 was_up = !!(ppd->host_link_state & HLS_UP);
7120 set_link_state(ppd, HLS_DN_OFFLINE);
7121 xchg(&ppd->is_link_down_queued, 0);
7125 /* link down reason is only valid if the link was up */
7126 read_link_down_reason(ppd->dd, &link_down_reason);
7127 switch (link_down_reason) {
7128 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7129 /* the link went down, no idle message reason */
7130 dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7133 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7135 * The neighbor reason is only valid if an idle message
7136 * was received for it.
7138 read_planned_down_reason_code(ppd->dd, &neigh_reason);
7139 dd_dev_info(ppd->dd,
7140 "%sNeighbor link down message %d, %s\n",
7141 ldr_str, neigh_reason,
7142 link_down_reason_str(neigh_reason));
7144 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7145 dd_dev_info(ppd->dd,
7146 "%sHost requested link to go offline\n",
7150 dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7151 ldr_str, link_down_reason);
7156 * If no reason, assume peer-initiated but missed
7157 * LinkGoingDown idle flits.
7159 if (neigh_reason == 0)
7160 lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7162 /* went down while polling or going up */
7163 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7166 set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7168 /* inform the SMA when the link transitions from up to down */
7169 if (was_up && ppd->local_link_down_reason.sma == 0 &&
7170 ppd->neigh_link_down_reason.sma == 0) {
7171 ppd->local_link_down_reason.sma =
7172 ppd->local_link_down_reason.latest;
7173 ppd->neigh_link_down_reason.sma =
7174 ppd->neigh_link_down_reason.latest;
7177 reset_neighbor_info(ppd);
7179 /* disable the port */
7180 clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7183 * If there is no cable attached, turn the DC off. Otherwise,
7184 * start the link bring up.
7186 if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7187 dc_shutdown(ppd->dd);
7192 void handle_link_bounce(struct work_struct *work)
7194 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7198 * Only do something if the link is currently up.
7200 if (ppd->host_link_state & HLS_UP) {
7201 set_link_state(ppd, HLS_DN_OFFLINE);
7204 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7205 __func__, link_state_name(ppd->host_link_state));
7210 * Mask conversion: Capability exchange to Port LTP. The capability
7211 * exchange has an implicit 16b CRC that is mandatory.
7213 static int cap_to_port_ltp(int cap)
7215 int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7217 if (cap & CAP_CRC_14B)
7218 port_ltp |= PORT_LTP_CRC_MODE_14;
7219 if (cap & CAP_CRC_48B)
7220 port_ltp |= PORT_LTP_CRC_MODE_48;
7221 if (cap & CAP_CRC_12B_16B_PER_LANE)
7222 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7228 * Convert an OPA Port LTP mask to capability mask
7230 int port_ltp_to_cap(int port_ltp)
7234 if (port_ltp & PORT_LTP_CRC_MODE_14)
7235 cap_mask |= CAP_CRC_14B;
7236 if (port_ltp & PORT_LTP_CRC_MODE_48)
7237 cap_mask |= CAP_CRC_48B;
7238 if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7239 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7245 * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7247 static int lcb_to_port_ltp(int lcb_crc)
7251 if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7252 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7253 else if (lcb_crc == LCB_CRC_48B)
7254 port_ltp = PORT_LTP_CRC_MODE_48;
7255 else if (lcb_crc == LCB_CRC_14B)
7256 port_ltp = PORT_LTP_CRC_MODE_14;
7258 port_ltp = PORT_LTP_CRC_MODE_16;
7263 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7265 if (ppd->pkeys[2] != 0) {
7267 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7268 hfi1_event_pkey_change(ppd->dd, ppd->port);
7273 * Convert the given link width to the OPA link width bitmask.
7275 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7280 * Simulator and quick linkup do not set the width.
7281 * Just set it to 4x without complaint.
7283 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7284 return OPA_LINK_WIDTH_4X;
7285 return 0; /* no lanes up */
7286 case 1: return OPA_LINK_WIDTH_1X;
7287 case 2: return OPA_LINK_WIDTH_2X;
7288 case 3: return OPA_LINK_WIDTH_3X;
7290 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7293 case 4: return OPA_LINK_WIDTH_4X;
7298 * Do a population count on the bottom nibble.
7300 static const u8 bit_counts[16] = {
7301 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7304 static inline u8 nibble_to_count(u8 nibble)
7306 return bit_counts[nibble & 0xf];
7310 * Read the active lane information from the 8051 registers and return
7313 * Active lane information is found in these 8051 registers:
7317 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7323 u8 tx_polarity_inversion;
7324 u8 rx_polarity_inversion;
7327 /* read the active lanes */
7328 read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7329 &rx_polarity_inversion, &max_rate);
7330 read_local_lni(dd, &enable_lane_rx);
7332 /* convert to counts */
7333 tx = nibble_to_count(enable_lane_tx);
7334 rx = nibble_to_count(enable_lane_rx);
7337 * Set link_speed_active here, overriding what was set in
7338 * handle_verify_cap(). The ASIC 8051 firmware does not correctly
7339 * set the max_rate field in handle_verify_cap until v0.19.
7341 if ((dd->icode == ICODE_RTL_SILICON) &&
7342 (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7343 /* max_rate: 0 = 12.5G, 1 = 25G */
7346 dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7350 "%s: unexpected max rate %d, using 25Gb\n",
7351 __func__, (int)max_rate);
7354 dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7360 "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7361 enable_lane_tx, tx, enable_lane_rx, rx);
7362 *tx_width = link_width_to_bits(dd, tx);
7363 *rx_width = link_width_to_bits(dd, rx);
7367 * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7368 * Valid after the end of VerifyCap and during LinkUp. Does not change
7369 * after link up. I.e. look elsewhere for downgrade information.
7372 * + bits [7:4] contain the number of active transmitters
7373 * + bits [3:0] contain the number of active receivers
7374 * These are numbers 1 through 4 and can be different values if the
7375 * link is asymmetric.
7377 * verify_cap_local_fm_link_width[0] retains its original value.
7379 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7383 u8 misc_bits, local_flags;
7384 u16 active_tx, active_rx;
7386 read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7388 rx = (widths >> 8) & 0xf;
7390 *tx_width = link_width_to_bits(dd, tx);
7391 *rx_width = link_width_to_bits(dd, rx);
7393 /* print the active widths */
7394 get_link_widths(dd, &active_tx, &active_rx);
7398 * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7399 * hardware information when the link first comes up.
7401 * The link width is not available until after VerifyCap.AllFramesReceived
7402 * (the trigger for handle_verify_cap), so this is outside that routine
7403 * and should be called when the 8051 signals linkup.
7405 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7407 u16 tx_width, rx_width;
7409 /* get end-of-LNI link widths */
7410 get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7412 /* use tx_width as the link is supposed to be symmetric on link up */
7413 ppd->link_width_active = tx_width;
7414 /* link width downgrade active (LWD.A) starts out matching LW.A */
7415 ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7416 ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7417 /* per OPA spec, on link up LWD.E resets to LWD.S */
7418 ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7419 /* cache the active egress rate (units {10^6 bits/sec]) */
7420 ppd->current_egress_rate = active_egress_rate(ppd);
7424 * Handle a verify capabilities interrupt from the 8051.
7426 * This is a work-queue function outside of the interrupt.
7428 void handle_verify_cap(struct work_struct *work)
7430 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7432 struct hfi1_devdata *dd = ppd->dd;
7434 u8 power_management;
7444 u16 active_tx, active_rx;
7445 u8 partner_supported_crc;
7449 set_link_state(ppd, HLS_VERIFY_CAP);
7451 lcb_shutdown(dd, 0);
7452 adjust_lcb_for_fpga_serdes(dd);
7454 read_vc_remote_phy(dd, &power_management, &continuous);
7455 read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7456 &partner_supported_crc);
7457 read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7458 read_remote_device_id(dd, &device_id, &device_rev);
7460 /* print the active widths */
7461 get_link_widths(dd, &active_tx, &active_rx);
7463 "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7464 (int)power_management, (int)continuous);
7466 "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7467 (int)vau, (int)z, (int)vcu, (int)vl15buf,
7468 (int)partner_supported_crc);
7469 dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7470 (u32)remote_tx_rate, (u32)link_widths);
7471 dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7472 (u32)device_id, (u32)device_rev);
7474 * The peer vAU value just read is the peer receiver value. HFI does
7475 * not support a transmit vAU of 0 (AU == 8). We advertised that
7476 * with Z=1 in the fabric capabilities sent to the peer. The peer
7477 * will see our Z=1, and, if it advertised a vAU of 0, will move its
7478 * receive to vAU of 1 (AU == 16). Do the same here. We do not care
7479 * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7480 * subject to the Z value exception.
7484 set_up_vau(dd, vau);
7487 * Set VL15 credits to 0 in global credit register. Cache remote VL15
7488 * credits value and wait for link-up interrupt ot set it.
7491 dd->vl15buf_cached = vl15buf;
7493 /* set up the LCB CRC mode */
7494 crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7496 /* order is important: use the lowest bit in common */
7497 if (crc_mask & CAP_CRC_14B)
7498 crc_val = LCB_CRC_14B;
7499 else if (crc_mask & CAP_CRC_48B)
7500 crc_val = LCB_CRC_48B;
7501 else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7502 crc_val = LCB_CRC_12B_16B_PER_LANE;
7504 crc_val = LCB_CRC_16B;
7506 dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7507 write_csr(dd, DC_LCB_CFG_CRC_MODE,
7508 (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7510 /* set (14b only) or clear sideband credit */
7511 reg = read_csr(dd, SEND_CM_CTRL);
7512 if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7513 write_csr(dd, SEND_CM_CTRL,
7514 reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7516 write_csr(dd, SEND_CM_CTRL,
7517 reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7520 ppd->link_speed_active = 0; /* invalid value */
7521 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7522 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7523 switch (remote_tx_rate) {
7525 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7528 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7532 /* actual rate is highest bit of the ANDed rates */
7533 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7536 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7538 ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7540 if (ppd->link_speed_active == 0) {
7541 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7542 __func__, (int)remote_tx_rate);
7543 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7547 * Cache the values of the supported, enabled, and active
7548 * LTP CRC modes to return in 'portinfo' queries. But the bit
7549 * flags that are returned in the portinfo query differ from
7550 * what's in the link_crc_mask, crc_sizes, and crc_val
7551 * variables. Convert these here.
7553 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7554 /* supported crc modes */
7555 ppd->port_ltp_crc_mode |=
7556 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7557 /* enabled crc modes */
7558 ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7559 /* active crc mode */
7561 /* set up the remote credit return table */
7562 assign_remote_cm_au_table(dd, vcu);
7565 * The LCB is reset on entry to handle_verify_cap(), so this must
7566 * be applied on every link up.
7568 * Adjust LCB error kill enable to kill the link if
7569 * these RBUF errors are seen:
7570 * REPLAY_BUF_MBE_SMASK
7571 * FLIT_INPUT_BUF_MBE_SMASK
7573 if (is_ax(dd)) { /* fixed in B0 */
7574 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7575 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7576 | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7577 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7580 /* pull LCB fifos out of reset - all fifo clocks must be stable */
7581 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7583 /* give 8051 access to the LCB CSRs */
7584 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7585 set_8051_lcb_access(dd);
7587 /* tell the 8051 to go to LinkUp */
7588 set_link_state(ppd, HLS_GOING_UP);
7592 * apply_link_downgrade_policy - Apply the link width downgrade enabled
7593 * policy against the current active link widths.
7594 * @ppd: info of physical Hfi port
7595 * @refresh_widths: True indicates link downgrade event
7596 * @return: True indicates a successful link downgrade. False indicates
7597 * link downgrade event failed and the link will bounce back to
7598 * default link width.
7600 * Called when the enabled policy changes or the active link widths
7602 * Refresh_widths indicates that a link downgrade occurred. The
7603 * link_downgraded variable is set by refresh_widths and
7604 * determines the success/failure of the policy application.
7606 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7607 bool refresh_widths)
7613 bool link_downgraded = refresh_widths;
7615 /* use the hls lock to avoid a race with actual link up */
7618 mutex_lock(&ppd->hls_lock);
7619 /* only apply if the link is up */
7620 if (ppd->host_link_state & HLS_DOWN) {
7621 /* still going up..wait and retry */
7622 if (ppd->host_link_state & HLS_GOING_UP) {
7623 if (++tries < 1000) {
7624 mutex_unlock(&ppd->hls_lock);
7625 usleep_range(100, 120); /* arbitrary */
7629 "%s: giving up waiting for link state change\n",
7635 lwde = ppd->link_width_downgrade_enabled;
7637 if (refresh_widths) {
7638 get_link_widths(ppd->dd, &tx, &rx);
7639 ppd->link_width_downgrade_tx_active = tx;
7640 ppd->link_width_downgrade_rx_active = rx;
7643 if (ppd->link_width_downgrade_tx_active == 0 ||
7644 ppd->link_width_downgrade_rx_active == 0) {
7645 /* the 8051 reported a dead link as a downgrade */
7646 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7647 link_downgraded = false;
7648 } else if (lwde == 0) {
7649 /* downgrade is disabled */
7651 /* bounce if not at starting active width */
7652 if ((ppd->link_width_active !=
7653 ppd->link_width_downgrade_tx_active) ||
7654 (ppd->link_width_active !=
7655 ppd->link_width_downgrade_rx_active)) {
7657 "Link downgrade is disabled and link has downgraded, downing link\n");
7659 " original 0x%x, tx active 0x%x, rx active 0x%x\n",
7660 ppd->link_width_active,
7661 ppd->link_width_downgrade_tx_active,
7662 ppd->link_width_downgrade_rx_active);
7664 link_downgraded = false;
7666 } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7667 (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7668 /* Tx or Rx is outside the enabled policy */
7670 "Link is outside of downgrade allowed, downing link\n");
7672 " enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7673 lwde, ppd->link_width_downgrade_tx_active,
7674 ppd->link_width_downgrade_rx_active);
7676 link_downgraded = false;
7680 mutex_unlock(&ppd->hls_lock);
7683 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7684 OPA_LINKDOWN_REASON_WIDTH_POLICY);
7685 set_link_state(ppd, HLS_DN_OFFLINE);
7689 return link_downgraded;
7693 * Handle a link downgrade interrupt from the 8051.
7695 * This is a work-queue function outside of the interrupt.
7697 void handle_link_downgrade(struct work_struct *work)
7699 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7700 link_downgrade_work);
7702 dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7703 if (apply_link_downgrade_policy(ppd, true))
7704 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7707 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7709 return flag_string(buf, buf_len, flags, dcc_err_flags,
7710 ARRAY_SIZE(dcc_err_flags));
7713 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7715 return flag_string(buf, buf_len, flags, lcb_err_flags,
7716 ARRAY_SIZE(lcb_err_flags));
7719 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7721 return flag_string(buf, buf_len, flags, dc8051_err_flags,
7722 ARRAY_SIZE(dc8051_err_flags));
7725 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7727 return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7728 ARRAY_SIZE(dc8051_info_err_flags));
7731 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7733 return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7734 ARRAY_SIZE(dc8051_info_host_msg_flags));
7737 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7739 struct hfi1_pportdata *ppd = dd->pport;
7740 u64 info, err, host_msg;
7741 int queue_link_down = 0;
7744 /* look at the flags */
7745 if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7746 /* 8051 information set by firmware */
7747 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7748 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7749 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7750 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7752 DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7753 & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7756 * Handle error flags.
7758 if (err & FAILED_LNI) {
7760 * LNI error indications are cleared by the 8051
7761 * only when starting polling. Only pay attention
7762 * to them when in the states that occur during
7765 if (ppd->host_link_state
7766 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7767 queue_link_down = 1;
7768 dd_dev_info(dd, "Link error: %s\n",
7769 dc8051_info_err_string(buf,
7774 err &= ~(u64)FAILED_LNI;
7776 /* unknown frames can happen durning LNI, just count */
7777 if (err & UNKNOWN_FRAME) {
7778 ppd->unknown_frame_count++;
7779 err &= ~(u64)UNKNOWN_FRAME;
7782 /* report remaining errors, but do not do anything */
7783 dd_dev_err(dd, "8051 info error: %s\n",
7784 dc8051_info_err_string(buf, sizeof(buf),
7789 * Handle host message flags.
7791 if (host_msg & HOST_REQ_DONE) {
7793 * Presently, the driver does a busy wait for
7794 * host requests to complete. This is only an
7795 * informational message.
7796 * NOTE: The 8051 clears the host message
7797 * information *on the next 8051 command*.
7798 * Therefore, when linkup is achieved,
7799 * this flag will still be set.
7801 host_msg &= ~(u64)HOST_REQ_DONE;
7803 if (host_msg & BC_SMA_MSG) {
7804 queue_work(ppd->link_wq, &ppd->sma_message_work);
7805 host_msg &= ~(u64)BC_SMA_MSG;
7807 if (host_msg & LINKUP_ACHIEVED) {
7808 dd_dev_info(dd, "8051: Link up\n");
7809 queue_work(ppd->link_wq, &ppd->link_up_work);
7810 host_msg &= ~(u64)LINKUP_ACHIEVED;
7812 if (host_msg & EXT_DEVICE_CFG_REQ) {
7813 handle_8051_request(ppd);
7814 host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7816 if (host_msg & VERIFY_CAP_FRAME) {
7817 queue_work(ppd->link_wq, &ppd->link_vc_work);
7818 host_msg &= ~(u64)VERIFY_CAP_FRAME;
7820 if (host_msg & LINK_GOING_DOWN) {
7821 const char *extra = "";
7822 /* no downgrade action needed if going down */
7823 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7824 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7825 extra = " (ignoring downgrade)";
7827 dd_dev_info(dd, "8051: Link down%s\n", extra);
7828 queue_link_down = 1;
7829 host_msg &= ~(u64)LINK_GOING_DOWN;
7831 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7832 queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7833 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7836 /* report remaining messages, but do not do anything */
7837 dd_dev_info(dd, "8051 info host message: %s\n",
7838 dc8051_info_host_msg_string(buf,
7843 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7845 if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7847 * Lost the 8051 heartbeat. If this happens, we
7848 * receive constant interrupts about it. Disable
7849 * the interrupt after the first.
7851 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7852 write_csr(dd, DC_DC8051_ERR_EN,
7853 read_csr(dd, DC_DC8051_ERR_EN) &
7854 ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7856 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7859 /* report the error, but do not do anything */
7860 dd_dev_err(dd, "8051 error: %s\n",
7861 dc8051_err_string(buf, sizeof(buf), reg));
7864 if (queue_link_down) {
7866 * if the link is already going down or disabled, do not
7867 * queue another. If there's a link down entry already
7868 * queued, don't queue another one.
7870 if ((ppd->host_link_state &
7871 (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7872 ppd->link_enabled == 0) {
7873 dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7874 __func__, ppd->host_link_state,
7877 if (xchg(&ppd->is_link_down_queued, 1) == 1)
7879 "%s: link down request already queued\n",
7882 queue_work(ppd->link_wq, &ppd->link_down_work);
7887 static const char * const fm_config_txt[] = {
7889 "BadHeadDist: Distance violation between two head flits",
7891 "BadTailDist: Distance violation between two tail flits",
7893 "BadCtrlDist: Distance violation between two credit control flits",
7895 "BadCrdAck: Credits return for unsupported VL",
7897 "UnsupportedVLMarker: Received VL Marker",
7899 "BadPreempt: Exceeded the preemption nesting level",
7901 "BadControlFlit: Received unsupported control flit",
7904 "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7907 static const char * const port_rcv_txt[] = {
7909 "BadPktLen: Illegal PktLen",
7911 "PktLenTooLong: Packet longer than PktLen",
7913 "PktLenTooShort: Packet shorter than PktLen",
7915 "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7917 "BadDLID: Illegal DLID (0, doesn't match HFI)",
7919 "BadL2: Illegal L2 opcode",
7921 "BadSC: Unsupported SC",
7923 "BadRC: Illegal RC",
7925 "PreemptError: Preempting with same VL",
7927 "PreemptVL15: Preempting a VL15 packet",
7930 #define OPA_LDR_FMCONFIG_OFFSET 16
7931 #define OPA_LDR_PORTRCV_OFFSET 0
7932 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7934 u64 info, hdr0, hdr1;
7937 struct hfi1_pportdata *ppd = dd->pport;
7941 if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7942 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7943 info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7944 dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7945 /* set status bit */
7946 dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7948 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7951 if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7952 struct hfi1_pportdata *ppd = dd->pport;
7953 /* this counter saturates at (2^32) - 1 */
7954 if (ppd->link_downed < (u32)UINT_MAX)
7956 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7959 if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7960 u8 reason_valid = 1;
7962 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7963 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7964 dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7965 /* set status bit */
7966 dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7976 extra = fm_config_txt[info];
7979 extra = fm_config_txt[info];
7980 if (ppd->port_error_action &
7981 OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7984 * lcl_reason cannot be derived from info
7988 OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7993 snprintf(buf, sizeof(buf), "reserved%lld", info);
7998 if (reason_valid && !do_bounce) {
7999 do_bounce = ppd->port_error_action &
8000 (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
8001 lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
8004 /* just report this */
8005 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
8007 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
8010 if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8011 u8 reason_valid = 1;
8013 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8014 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8015 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8016 if (!(dd->err_info_rcvport.status_and_code &
8017 OPA_EI_STATUS_SMASK)) {
8018 dd->err_info_rcvport.status_and_code =
8019 info & OPA_EI_CODE_SMASK;
8020 /* set status bit */
8021 dd->err_info_rcvport.status_and_code |=
8022 OPA_EI_STATUS_SMASK;
8024 * save first 2 flits in the packet that caused
8027 dd->err_info_rcvport.packet_flit1 = hdr0;
8028 dd->err_info_rcvport.packet_flit2 = hdr1;
8041 extra = port_rcv_txt[info];
8045 snprintf(buf, sizeof(buf), "reserved%lld", info);
8050 if (reason_valid && !do_bounce) {
8051 do_bounce = ppd->port_error_action &
8052 (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8053 lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8056 /* just report this */
8057 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8058 " hdr0 0x%llx, hdr1 0x%llx\n",
8061 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8064 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8065 /* informative only */
8066 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8067 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8069 if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8070 /* informative only */
8071 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8072 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8075 if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8076 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8078 /* report any remaining errors */
8080 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8081 dcc_err_string(buf, sizeof(buf), reg));
8083 if (lcl_reason == 0)
8084 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8087 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8089 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8090 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8094 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8098 dd_dev_info(dd, "LCB Error: %s\n",
8099 lcb_err_string(buf, sizeof(buf), reg));
8103 * CCE block DC interrupt. Source is < 8.
8105 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8107 const struct err_reg_info *eri = &dc_errs[source];
8110 interrupt_clear_down(dd, 0, eri);
8111 } else if (source == 3 /* dc_lbm_int */) {
8113 * This indicates that a parity error has occurred on the
8114 * address/control lines presented to the LBM. The error
8115 * is a single pulse, there is no associated error flag,
8116 * and it is non-maskable. This is because if a parity
8117 * error occurs on the request the request is dropped.
8118 * This should never occur, but it is nice to know if it
8121 dd_dev_err(dd, "Parity error in DC LBM block\n");
8123 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8128 * TX block send credit interrupt. Source is < 160.
8130 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8132 sc_group_release_update(dd, source);
8136 * TX block SDMA interrupt. Source is < 48.
8138 * SDMA interrupts are grouped by type:
8141 * N - 2N-1 = SDmaProgress
8142 * 2N - 3N-1 = SDmaIdle
8144 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8146 /* what interrupt */
8147 unsigned int what = source / TXE_NUM_SDMA_ENGINES;
8149 unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8151 #ifdef CONFIG_SDMA_VERBOSITY
8152 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8153 slashstrip(__FILE__), __LINE__, __func__);
8154 sdma_dumpstate(&dd->per_sdma[which]);
8157 if (likely(what < 3 && which < dd->num_sdma)) {
8158 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8160 /* should not happen */
8161 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8166 * is_rcv_avail_int() - User receive context available IRQ handler
8168 * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8170 * RX block receive available interrupt. Source is < 160.
8172 * This is the general interrupt handler for user (PSM) receive contexts,
8173 * and can only be used for non-threaded IRQs.
8175 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8177 struct hfi1_ctxtdata *rcd;
8180 if (likely(source < dd->num_rcv_contexts)) {
8181 rcd = hfi1_rcd_get_by_index(dd, source);
8183 handle_user_interrupt(rcd);
8187 /* received an interrupt, but no rcd */
8188 err_detail = "dataless";
8190 /* received an interrupt, but are not using that context */
8191 err_detail = "out of range";
8193 dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8194 err_detail, source);
8198 * is_rcv_urgent_int() - User receive context urgent IRQ handler
8200 * @source: logical IRQ source (offset from IS_RCVURGENT_START)
8202 * RX block receive urgent interrupt. Source is < 160.
8204 * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8206 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8208 struct hfi1_ctxtdata *rcd;
8211 if (likely(source < dd->num_rcv_contexts)) {
8212 rcd = hfi1_rcd_get_by_index(dd, source);
8214 handle_user_interrupt(rcd);
8218 /* received an interrupt, but no rcd */
8219 err_detail = "dataless";
8221 /* received an interrupt, but are not using that context */
8222 err_detail = "out of range";
8224 dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8225 err_detail, source);
8229 * Reserved range interrupt. Should not be called in normal operation.
8231 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8235 dd_dev_err(dd, "unexpected %s interrupt\n",
8236 is_reserved_name(name, sizeof(name), source));
8239 static const struct is_table is_table[] = {
8242 * name func interrupt func
8244 { IS_GENERAL_ERR_START, IS_GENERAL_ERR_END,
8245 is_misc_err_name, is_misc_err_int },
8246 { IS_SDMAENG_ERR_START, IS_SDMAENG_ERR_END,
8247 is_sdma_eng_err_name, is_sdma_eng_err_int },
8248 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8249 is_sendctxt_err_name, is_sendctxt_err_int },
8250 { IS_SDMA_START, IS_SDMA_IDLE_END,
8251 is_sdma_eng_name, is_sdma_eng_int },
8252 { IS_VARIOUS_START, IS_VARIOUS_END,
8253 is_various_name, is_various_int },
8254 { IS_DC_START, IS_DC_END,
8255 is_dc_name, is_dc_int },
8256 { IS_RCVAVAIL_START, IS_RCVAVAIL_END,
8257 is_rcv_avail_name, is_rcv_avail_int },
8258 { IS_RCVURGENT_START, IS_RCVURGENT_END,
8259 is_rcv_urgent_name, is_rcv_urgent_int },
8260 { IS_SENDCREDIT_START, IS_SENDCREDIT_END,
8261 is_send_credit_name, is_send_credit_int},
8262 { IS_RESERVED_START, IS_RESERVED_END,
8263 is_reserved_name, is_reserved_int},
8267 * Interrupt source interrupt - called when the given source has an interrupt.
8268 * Source is a bit index into an array of 64-bit integers.
8270 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8272 const struct is_table *entry;
8274 /* avoids a double compare by walking the table in-order */
8275 for (entry = &is_table[0]; entry->is_name; entry++) {
8276 if (source <= entry->end) {
8277 trace_hfi1_interrupt(dd, entry, source);
8278 entry->is_int(dd, source - entry->start);
8282 /* fell off the end */
8283 dd_dev_err(dd, "invalid interrupt source %u\n", source);
8287 * gerneral_interrupt() - General interrupt handler
8288 * @irq: MSIx IRQ vector
8289 * @data: hfi1 devdata
8291 * This is able to correctly handle all non-threaded interrupts. Receive
8292 * context DATA IRQs are threaded and are not supported by this handler.
8295 irqreturn_t general_interrupt(int irq, void *data)
8297 struct hfi1_devdata *dd = data;
8298 u64 regs[CCE_NUM_INT_CSRS];
8301 irqreturn_t handled = IRQ_NONE;
8303 this_cpu_inc(*dd->int_counter);
8305 /* phase 1: scan and clear all handled interrupts */
8306 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8307 if (dd->gi_mask[i] == 0) {
8308 regs[i] = 0; /* used later */
8311 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8313 /* only clear if anything is set */
8315 write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8318 /* phase 2: call the appropriate handler */
8319 for_each_set_bit(bit, (unsigned long *)®s[0],
8320 CCE_NUM_INT_CSRS * 64) {
8321 is_interrupt(dd, bit);
8322 handled = IRQ_HANDLED;
8328 irqreturn_t sdma_interrupt(int irq, void *data)
8330 struct sdma_engine *sde = data;
8331 struct hfi1_devdata *dd = sde->dd;
8334 #ifdef CONFIG_SDMA_VERBOSITY
8335 dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8336 slashstrip(__FILE__), __LINE__, __func__);
8337 sdma_dumpstate(sde);
8340 this_cpu_inc(*dd->int_counter);
8342 /* This read_csr is really bad in the hot path */
8343 status = read_csr(dd,
8344 CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8346 if (likely(status)) {
8347 /* clear the interrupt(s) */
8349 CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8352 /* handle the interrupt(s) */
8353 sdma_engine_interrupt(sde, status);
8355 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8362 * Clear the receive interrupt. Use a read of the interrupt clear CSR
8363 * to insure that the write completed. This does NOT guarantee that
8364 * queued DMA writes to memory from the chip are pushed.
8366 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8368 struct hfi1_devdata *dd = rcd->dd;
8369 u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8371 write_csr(dd, addr, rcd->imask);
8372 /* force the above write on the chip and get a value back */
8373 (void)read_csr(dd, addr);
8376 /* force the receive interrupt */
8377 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8379 write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8383 * Return non-zero if a packet is present.
8385 * This routine is called when rechecking for packets after the RcvAvail
8386 * interrupt has been cleared down. First, do a quick check of memory for
8387 * a packet present. If not found, use an expensive CSR read of the context
8388 * tail to determine the actual tail. The CSR read is necessary because there
8389 * is no method to push pending DMAs to memory other than an interrupt and we
8390 * are trying to determine if we need to force an interrupt.
8392 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8397 if (!rcd->rcvhdrtail_kvaddr)
8398 present = (rcd->seq_cnt ==
8399 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8400 else /* is RDMA rtail */
8401 present = (rcd->head != get_rcvhdrtail(rcd));
8406 /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8407 tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8408 return rcd->head != tail;
8412 * Receive packet IRQ handler. This routine expects to be on its own IRQ.
8413 * This routine will try to handle packets immediately (latency), but if
8414 * it finds too many, it will invoke the thread handler (bandwitdh). The
8415 * chip receive interrupt is *not* cleared down until this or the thread (if
8416 * invoked) is finished. The intent is to avoid extra interrupts while we
8417 * are processing packets anyway.
8419 irqreturn_t receive_context_interrupt(int irq, void *data)
8421 struct hfi1_ctxtdata *rcd = data;
8422 struct hfi1_devdata *dd = rcd->dd;
8426 trace_hfi1_receive_interrupt(dd, rcd);
8427 this_cpu_inc(*dd->int_counter);
8428 aspm_ctx_disable(rcd);
8430 /* receive interrupt remains blocked while processing packets */
8431 disposition = rcd->do_interrupt(rcd, 0);
8434 * Too many packets were seen while processing packets in this
8435 * IRQ handler. Invoke the handler thread. The receive interrupt
8438 if (disposition == RCV_PKT_LIMIT)
8439 return IRQ_WAKE_THREAD;
8442 * The packet processor detected no more packets. Clear the receive
8443 * interrupt and recheck for a packet packet that may have arrived
8444 * after the previous check and interrupt clear. If a packet arrived,
8445 * force another interrupt.
8447 clear_recv_intr(rcd);
8448 present = check_packet_present(rcd);
8450 force_recv_intr(rcd);
8456 * Receive packet thread handler. This expects to be invoked with the
8457 * receive interrupt still blocked.
8459 irqreturn_t receive_context_thread(int irq, void *data)
8461 struct hfi1_ctxtdata *rcd = data;
8464 /* receive interrupt is still blocked from the IRQ handler */
8465 (void)rcd->do_interrupt(rcd, 1);
8468 * The packet processor will only return if it detected no more
8469 * packets. Hold IRQs here so we can safely clear the interrupt and
8470 * recheck for a packet that may have arrived after the previous
8471 * check and the interrupt clear. If a packet arrived, force another
8474 local_irq_disable();
8475 clear_recv_intr(rcd);
8476 present = check_packet_present(rcd);
8478 force_recv_intr(rcd);
8484 /* ========================================================================= */
8486 u32 read_physical_state(struct hfi1_devdata *dd)
8490 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8491 return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8492 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8495 u32 read_logical_state(struct hfi1_devdata *dd)
8499 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8500 return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8501 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8504 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8508 reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8509 /* clear current state, set new state */
8510 reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8511 reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8512 write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8516 * Use the 8051 to read a LCB CSR.
8518 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8523 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8524 if (acquire_lcb_access(dd, 0) == 0) {
8525 *data = read_csr(dd, addr);
8526 release_lcb_access(dd, 0);
8532 /* register is an index of LCB registers: (offset - base) / 8 */
8533 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8534 ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8535 if (ret != HCMD_SUCCESS)
8541 * Provide a cache for some of the LCB registers in case the LCB is
8543 * (The LCB is unavailable in certain link states, for example.)
8550 static struct lcb_datum lcb_cache[] = {
8551 { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8552 { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8553 { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8556 static void update_lcb_cache(struct hfi1_devdata *dd)
8562 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8563 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8565 /* Update if we get good data */
8566 if (likely(ret != -EBUSY))
8567 lcb_cache[i].val = val;
8571 static int read_lcb_cache(u32 off, u64 *val)
8575 for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8576 if (lcb_cache[i].off == off) {
8577 *val = lcb_cache[i].val;
8582 pr_warn("%s bad offset 0x%x\n", __func__, off);
8587 * Read an LCB CSR. Access may not be in host control, so check.
8588 * Return 0 on success, -EBUSY on failure.
8590 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8592 struct hfi1_pportdata *ppd = dd->pport;
8594 /* if up, go through the 8051 for the value */
8595 if (ppd->host_link_state & HLS_UP)
8596 return read_lcb_via_8051(dd, addr, data);
8597 /* if going up or down, check the cache, otherwise, no access */
8598 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8599 if (read_lcb_cache(addr, data))
8604 /* otherwise, host has access */
8605 *data = read_csr(dd, addr);
8610 * Use the 8051 to write a LCB CSR.
8612 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8617 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8618 (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8619 if (acquire_lcb_access(dd, 0) == 0) {
8620 write_csr(dd, addr, data);
8621 release_lcb_access(dd, 0);
8627 /* register is an index of LCB registers: (offset - base) / 8 */
8628 regno = (addr - DC_LCB_CFG_RUN) >> 3;
8629 ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8630 if (ret != HCMD_SUCCESS)
8636 * Write an LCB CSR. Access may not be in host control, so check.
8637 * Return 0 on success, -EBUSY on failure.
8639 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8641 struct hfi1_pportdata *ppd = dd->pport;
8643 /* if up, go through the 8051 for the value */
8644 if (ppd->host_link_state & HLS_UP)
8645 return write_lcb_via_8051(dd, addr, data);
8646 /* if going up or down, no access */
8647 if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8649 /* otherwise, host has access */
8650 write_csr(dd, addr, data);
8656 * < 0 = Linux error, not able to get access
8657 * > 0 = 8051 command RETURN_CODE
8659 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8664 unsigned long timeout;
8666 hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8668 mutex_lock(&dd->dc8051_lock);
8670 /* We can't send any commands to the 8051 if it's in reset */
8671 if (dd->dc_shutdown) {
8672 return_code = -ENODEV;
8677 * If an 8051 host command timed out previously, then the 8051 is
8680 * On first timeout, attempt to reset and restart the entire DC
8681 * block (including 8051). (Is this too big of a hammer?)
8683 * If the 8051 times out a second time, the reset did not bring it
8684 * back to healthy life. In that case, fail any subsequent commands.
8686 if (dd->dc8051_timed_out) {
8687 if (dd->dc8051_timed_out > 1) {
8689 "Previous 8051 host command timed out, skipping command %u\n",
8691 return_code = -ENXIO;
8699 * If there is no timeout, then the 8051 command interface is
8700 * waiting for a command.
8704 * When writing a LCB CSR, out_data contains the full value to
8705 * to be written, while in_data contains the relative LCB
8706 * address in 7:0. Do the work here, rather than the caller,
8707 * of distrubting the write data to where it needs to go:
8710 * 39:00 -> in_data[47:8]
8711 * 47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8712 * 63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8714 if (type == HCMD_WRITE_LCB_CSR) {
8715 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8716 /* must preserve COMPLETED - it is tied to hardware */
8717 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8718 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8719 reg |= ((((*out_data) >> 40) & 0xff) <<
8720 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8721 | ((((*out_data) >> 48) & 0xffff) <<
8722 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8723 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8727 * Do two writes: the first to stabilize the type and req_data, the
8728 * second to activate.
8730 reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8731 << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8732 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8733 << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8734 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8735 reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8736 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8738 /* wait for completion, alternate: interrupt */
8739 timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8741 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8742 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8745 if (time_after(jiffies, timeout)) {
8746 dd->dc8051_timed_out++;
8747 dd_dev_err(dd, "8051 host command %u timeout\n", type);
8750 return_code = -ETIMEDOUT;
8757 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8758 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8759 if (type == HCMD_READ_LCB_CSR) {
8760 /* top 16 bits are in a different register */
8761 *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8762 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8764 - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8767 return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8768 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8769 dd->dc8051_timed_out = 0;
8771 * Clear command for next user.
8773 write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8776 mutex_unlock(&dd->dc8051_lock);
8780 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8782 return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8785 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8786 u8 lane_id, u32 config_data)
8791 data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8792 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8793 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8794 ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8795 if (ret != HCMD_SUCCESS) {
8797 "load 8051 config: field id %d, lane %d, err %d\n",
8798 (int)field_id, (int)lane_id, ret);
8804 * Read the 8051 firmware "registers". Use the RAM directly. Always
8805 * set the result, even on error.
8806 * Return 0 on success, -errno on failure
8808 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8815 /* address start depends on the lane_id */
8817 addr = (4 * NUM_GENERAL_FIELDS)
8818 + (lane_id * 4 * NUM_LANE_FIELDS);
8821 addr += field_id * 4;
8823 /* read is in 8-byte chunks, hardware will truncate the address down */
8824 ret = read_8051_data(dd, addr, 8, &big_data);
8827 /* extract the 4 bytes we want */
8829 *result = (u32)(big_data >> 32);
8831 *result = (u32)big_data;
8834 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8835 __func__, lane_id, field_id);
8841 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8846 frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8847 | power_management << POWER_MANAGEMENT_SHIFT;
8848 return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8849 GENERAL_CONFIG, frame);
8852 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8853 u16 vl15buf, u8 crc_sizes)
8857 frame = (u32)vau << VAU_SHIFT
8859 | (u32)vcu << VCU_SHIFT
8860 | (u32)vl15buf << VL15BUF_SHIFT
8861 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8862 return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8863 GENERAL_CONFIG, frame);
8866 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8867 u8 *flag_bits, u16 *link_widths)
8871 read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8873 *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8874 *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8875 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8878 static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8885 frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8886 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8887 | (u32)link_widths << LINK_WIDTH_SHIFT;
8888 return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8892 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8897 frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8898 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8899 return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8902 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8907 read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8908 *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8909 *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8910 & REMOTE_DEVICE_REV_MASK;
8913 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8918 mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8919 read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8920 /* Clear, then set field */
8922 frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8923 return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8927 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8932 read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8933 *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8934 STS_FM_VERSION_MAJOR_MASK;
8935 *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8936 STS_FM_VERSION_MINOR_MASK;
8938 read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8939 *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8940 STS_FM_VERSION_PATCH_MASK;
8943 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8948 read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8949 *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8950 & POWER_MANAGEMENT_MASK;
8951 *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8952 & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8955 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8956 u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8960 read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8961 *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8962 *z = (frame >> Z_SHIFT) & Z_MASK;
8963 *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8964 *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8965 *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8968 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8974 read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8976 *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8977 & REMOTE_TX_RATE_MASK;
8978 *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8981 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8985 read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8986 *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8989 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8991 read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8994 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8996 read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8999 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
9005 if (dd->pport->host_link_state & HLS_UP) {
9006 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9009 *link_quality = (frame >> LINK_QUALITY_SHIFT)
9010 & LINK_QUALITY_MASK;
9014 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9018 read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9019 *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9022 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9026 read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9027 *ldr = (frame & 0xff);
9030 static int read_tx_settings(struct hfi1_devdata *dd,
9032 u8 *tx_polarity_inversion,
9033 u8 *rx_polarity_inversion,
9039 ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9040 *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9041 & ENABLE_LANE_TX_MASK;
9042 *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9043 & TX_POLARITY_INVERSION_MASK;
9044 *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9045 & RX_POLARITY_INVERSION_MASK;
9046 *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9050 static int write_tx_settings(struct hfi1_devdata *dd,
9052 u8 tx_polarity_inversion,
9053 u8 rx_polarity_inversion,
9058 /* no need to mask, all variable sizes match field widths */
9059 frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9060 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9061 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9062 | max_rate << MAX_RATE_SHIFT;
9063 return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9067 * Read an idle LCB message.
9069 * Returns 0 on success, -EINVAL on error
9071 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9075 ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9076 if (ret != HCMD_SUCCESS) {
9077 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9081 dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9082 /* return only the payload as we already know the type */
9083 *data_out >>= IDLE_PAYLOAD_SHIFT;
9088 * Read an idle SMA message. To be done in response to a notification from
9091 * Returns 0 on success, -EINVAL on error
9093 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9095 return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9100 * Send an idle LCB message.
9102 * Returns 0 on success, -EINVAL on error
9104 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9108 dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9109 ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9110 if (ret != HCMD_SUCCESS) {
9111 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9119 * Send an idle SMA message.
9121 * Returns 0 on success, -EINVAL on error
9123 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9127 data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9128 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9129 return send_idle_message(dd, data);
9133 * Initialize the LCB then do a quick link up. This may or may not be
9136 * return 0 on success, -errno on error
9138 static int do_quick_linkup(struct hfi1_devdata *dd)
9142 lcb_shutdown(dd, 0);
9145 /* LCB_CFG_LOOPBACK.VAL = 2 */
9146 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9147 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9148 IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9149 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9152 /* start the LCBs */
9153 /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9154 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9156 /* simulator only loopback steps */
9157 if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9158 /* LCB_CFG_RUN.EN = 1 */
9159 write_csr(dd, DC_LCB_CFG_RUN,
9160 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9162 ret = wait_link_transfer_active(dd, 10);
9166 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9167 1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9172 * When doing quick linkup and not in loopback, both
9173 * sides must be done with LCB set-up before either
9174 * starts the quick linkup. Put a delay here so that
9175 * both sides can be started and have a chance to be
9176 * done with LCB set up before resuming.
9179 "Pausing for peer to be finished with LCB set up\n");
9181 dd_dev_err(dd, "Continuing with quick linkup\n");
9184 write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9185 set_8051_lcb_access(dd);
9188 * State "quick" LinkUp request sets the physical link state to
9189 * LinkUp without a verify capability sequence.
9190 * This state is in simulator v37 and later.
9192 ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9193 if (ret != HCMD_SUCCESS) {
9195 "%s: set physical link state to quick LinkUp failed with return %d\n",
9198 set_host_lcb_access(dd);
9199 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9206 return 0; /* success */
9210 * Do all special steps to set up loopback.
9212 static int init_loopback(struct hfi1_devdata *dd)
9214 dd_dev_info(dd, "Entering loopback mode\n");
9216 /* all loopbacks should disable self GUID check */
9217 write_csr(dd, DC_DC8051_CFG_MODE,
9218 (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9221 * The simulator has only one loopback option - LCB. Switch
9222 * to that option, which includes quick link up.
9224 * Accept all valid loopback values.
9226 if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9227 (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9228 loopback == LOOPBACK_CABLE)) {
9229 loopback = LOOPBACK_LCB;
9235 * SerDes loopback init sequence is handled in set_local_link_attributes
9237 if (loopback == LOOPBACK_SERDES)
9240 /* LCB loopback - handled at poll time */
9241 if (loopback == LOOPBACK_LCB) {
9242 quick_linkup = 1; /* LCB is always quick linkup */
9244 /* not supported in emulation due to emulation RTL changes */
9245 if (dd->icode == ICODE_FPGA_EMULATION) {
9247 "LCB loopback not supported in emulation\n");
9253 /* external cable loopback requires no extra steps */
9254 if (loopback == LOOPBACK_CABLE)
9257 dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9262 * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9263 * used in the Verify Capability link width attribute.
9265 static u16 opa_to_vc_link_widths(u16 opa_widths)
9270 static const struct link_bits {
9273 } opa_link_xlate[] = {
9274 { OPA_LINK_WIDTH_1X, 1 << (1 - 1) },
9275 { OPA_LINK_WIDTH_2X, 1 << (2 - 1) },
9276 { OPA_LINK_WIDTH_3X, 1 << (3 - 1) },
9277 { OPA_LINK_WIDTH_4X, 1 << (4 - 1) },
9280 for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9281 if (opa_widths & opa_link_xlate[i].from)
9282 result |= opa_link_xlate[i].to;
9288 * Set link attributes before moving to polling.
9290 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9292 struct hfi1_devdata *dd = ppd->dd;
9294 u8 tx_polarity_inversion;
9295 u8 rx_polarity_inversion;
9298 /* reset our fabric serdes to clear any lingering problems */
9299 fabric_serdes_reset(dd);
9301 /* set the local tx rate - need to read-modify-write */
9302 ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9303 &rx_polarity_inversion, &ppd->local_tx_rate);
9305 goto set_local_link_attributes_fail;
9307 if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9308 /* set the tx rate to the fastest enabled */
9309 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9310 ppd->local_tx_rate = 1;
9312 ppd->local_tx_rate = 0;
9314 /* set the tx rate to all enabled */
9315 ppd->local_tx_rate = 0;
9316 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9317 ppd->local_tx_rate |= 2;
9318 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9319 ppd->local_tx_rate |= 1;
9322 enable_lane_tx = 0xF; /* enable all four lanes */
9323 ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9324 rx_polarity_inversion, ppd->local_tx_rate);
9325 if (ret != HCMD_SUCCESS)
9326 goto set_local_link_attributes_fail;
9328 ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9329 if (ret != HCMD_SUCCESS) {
9331 "Failed to set host interface version, return 0x%x\n",
9333 goto set_local_link_attributes_fail;
9337 * DC supports continuous updates.
9339 ret = write_vc_local_phy(dd,
9340 0 /* no power management */,
9341 1 /* continuous updates */);
9342 if (ret != HCMD_SUCCESS)
9343 goto set_local_link_attributes_fail;
9345 /* z=1 in the next call: AU of 0 is not supported by the hardware */
9346 ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9347 ppd->port_crc_mode_enabled);
9348 if (ret != HCMD_SUCCESS)
9349 goto set_local_link_attributes_fail;
9352 * SerDes loopback init sequence requires
9353 * setting bit 0 of MISC_CONFIG_BITS
9355 if (loopback == LOOPBACK_SERDES)
9356 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9359 * An external device configuration request is used to reset the LCB
9360 * to retry to obtain operational lanes when the first attempt is
9363 if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9364 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9366 ret = write_vc_local_link_mode(dd, misc_bits, 0,
9367 opa_to_vc_link_widths(
9368 ppd->link_width_enabled));
9369 if (ret != HCMD_SUCCESS)
9370 goto set_local_link_attributes_fail;
9372 /* let peer know who we are */
9373 ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9374 if (ret == HCMD_SUCCESS)
9377 set_local_link_attributes_fail:
9379 "Failed to set local link attributes, return 0x%x\n",
9385 * Call this to start the link.
9386 * Do not do anything if the link is disabled.
9387 * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9389 int start_link(struct hfi1_pportdata *ppd)
9392 * Tune the SerDes to a ballpark setting for optimal signal and bit
9393 * error rate. Needs to be done before starting the link.
9397 if (!ppd->driver_link_ready) {
9398 dd_dev_info(ppd->dd,
9399 "%s: stopping link start because driver is not ready\n",
9405 * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9406 * pkey table can be configured properly if the HFI unit is connected
9407 * to switch port with MgmtAllowed=NO
9409 clear_full_mgmt_pkey(ppd);
9411 return set_link_state(ppd, HLS_DN_POLL);
9414 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9416 struct hfi1_devdata *dd = ppd->dd;
9418 unsigned long timeout;
9421 * Some QSFP cables have a quirk that asserts the IntN line as a side
9422 * effect of power up on plug-in. We ignore this false positive
9423 * interrupt until the module has finished powering up by waiting for
9424 * a minimum timeout of the module inrush initialization time of
9425 * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9426 * module have stabilized.
9431 * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9433 timeout = jiffies + msecs_to_jiffies(2000);
9435 mask = read_csr(dd, dd->hfi1_id ?
9436 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9437 if (!(mask & QSFP_HFI0_INT_N))
9439 if (time_after(jiffies, timeout)) {
9440 dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9448 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9450 struct hfi1_devdata *dd = ppd->dd;
9453 mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9456 * Clear the status register to avoid an immediate interrupt
9457 * when we re-enable the IntN pin
9459 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9461 mask |= (u64)QSFP_HFI0_INT_N;
9463 mask &= ~(u64)QSFP_HFI0_INT_N;
9465 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9468 int reset_qsfp(struct hfi1_pportdata *ppd)
9470 struct hfi1_devdata *dd = ppd->dd;
9471 u64 mask, qsfp_mask;
9473 /* Disable INT_N from triggering QSFP interrupts */
9474 set_qsfp_int_n(ppd, 0);
9476 /* Reset the QSFP */
9477 mask = (u64)QSFP_HFI0_RESET_N;
9479 qsfp_mask = read_csr(dd,
9480 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9483 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9489 dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9491 wait_for_qsfp_init(ppd);
9494 * Allow INT_N to trigger the QSFP interrupt to watch
9495 * for alarms and warnings
9497 set_qsfp_int_n(ppd, 1);
9500 * After the reset, AOC transmitters are enabled by default. They need
9501 * to be turned off to complete the QSFP setup before they can be
9504 return set_qsfp_tx(ppd, 0);
9507 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9508 u8 *qsfp_interrupt_status)
9510 struct hfi1_devdata *dd = ppd->dd;
9512 if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9513 (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9514 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9517 if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9518 (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9519 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9523 * The remaining alarms/warnings don't matter if the link is down.
9525 if (ppd->host_link_state & HLS_DOWN)
9528 if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9529 (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9530 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9533 if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9534 (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9535 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9538 /* Byte 2 is vendor specific */
9540 if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9541 (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9542 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9545 if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9546 (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9547 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9550 if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9551 (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9552 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9555 if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9556 (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9557 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9560 if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9561 (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9562 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9565 if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9566 (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9567 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9570 if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9571 (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9572 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9575 if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9576 (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9577 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9580 if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9581 (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9582 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9585 if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9586 (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9587 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9590 if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9591 (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9592 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9595 if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9596 (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9597 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9600 /* Bytes 9-10 and 11-12 are reserved */
9601 /* Bytes 13-15 are vendor specific */
9606 /* This routine will only be scheduled if the QSFP module present is asserted */
9607 void qsfp_event(struct work_struct *work)
9609 struct qsfp_data *qd;
9610 struct hfi1_pportdata *ppd;
9611 struct hfi1_devdata *dd;
9613 qd = container_of(work, struct qsfp_data, qsfp_work);
9618 if (!qsfp_mod_present(ppd))
9621 if (ppd->host_link_state == HLS_DN_DISABLE) {
9622 dd_dev_info(ppd->dd,
9623 "%s: stopping link start because link is disabled\n",
9629 * Turn DC back on after cable has been re-inserted. Up until
9630 * now, the DC has been in reset to save power.
9634 if (qd->cache_refresh_required) {
9635 set_qsfp_int_n(ppd, 0);
9637 wait_for_qsfp_init(ppd);
9640 * Allow INT_N to trigger the QSFP interrupt to watch
9641 * for alarms and warnings
9643 set_qsfp_int_n(ppd, 1);
9648 if (qd->check_interrupt_flags) {
9649 u8 qsfp_interrupt_status[16] = {0,};
9651 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9652 &qsfp_interrupt_status[0], 16) != 16) {
9654 "%s: Failed to read status of QSFP module\n",
9657 unsigned long flags;
9659 handle_qsfp_error_conditions(
9660 ppd, qsfp_interrupt_status);
9661 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9662 ppd->qsfp_info.check_interrupt_flags = 0;
9663 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9669 void init_qsfp_int(struct hfi1_devdata *dd)
9671 struct hfi1_pportdata *ppd = dd->pport;
9674 qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9675 /* Clear current status to avoid spurious interrupts */
9676 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9678 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9681 set_qsfp_int_n(ppd, 0);
9683 /* Handle active low nature of INT_N and MODPRST_N pins */
9684 if (qsfp_mod_present(ppd))
9685 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9687 dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9690 /* Enable the appropriate QSFP IRQ source */
9692 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
9694 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
9698 * Do a one-time initialize of the LCB block.
9700 static void init_lcb(struct hfi1_devdata *dd)
9702 /* simulator does not correctly handle LCB cclk loopback, skip */
9703 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9706 /* the DC has been reset earlier in the driver load */
9708 /* set LCB for cclk loopback on the port */
9709 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9710 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9711 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9712 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9713 write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9714 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9715 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9719 * Perform a test read on the QSFP. Return 0 on success, -ERRNO
9722 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9728 * Report success if not a QSFP or, if it is a QSFP, but the cable is
9731 if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9734 /* read byte 2, the status byte */
9735 ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9741 return 0; /* success */
9745 * Values for QSFP retry.
9747 * Give up after 10s (20 x 500ms). The overall timeout was empirically
9748 * arrived at from experience on a large cluster.
9750 #define MAX_QSFP_RETRIES 20
9751 #define QSFP_RETRY_WAIT 500 /* msec */
9754 * Try a QSFP read. If it fails, schedule a retry for later.
9755 * Called on first link activation after driver load.
9757 static void try_start_link(struct hfi1_pportdata *ppd)
9759 if (test_qsfp_read(ppd)) {
9761 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9762 dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9765 dd_dev_info(ppd->dd,
9766 "QSFP not responding, waiting and retrying %d\n",
9767 (int)ppd->qsfp_retry_count);
9768 ppd->qsfp_retry_count++;
9769 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9770 msecs_to_jiffies(QSFP_RETRY_WAIT));
9773 ppd->qsfp_retry_count = 0;
9779 * Workqueue function to start the link after a delay.
9781 void handle_start_link(struct work_struct *work)
9783 struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9784 start_link_work.work);
9785 try_start_link(ppd);
9788 int bringup_serdes(struct hfi1_pportdata *ppd)
9790 struct hfi1_devdata *dd = ppd->dd;
9794 if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9795 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9797 guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9800 guid = dd->base_guid + ppd->port - 1;
9801 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9804 /* Set linkinit_reason on power up per OPA spec */
9805 ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9807 /* one-time init of the LCB */
9811 ret = init_loopback(dd);
9817 if (ppd->port_type == PORT_TYPE_QSFP) {
9818 set_qsfp_int_n(ppd, 0);
9819 wait_for_qsfp_init(ppd);
9820 set_qsfp_int_n(ppd, 1);
9823 try_start_link(ppd);
9827 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9829 struct hfi1_devdata *dd = ppd->dd;
9832 * Shut down the link and keep it down. First turn off that the
9833 * driver wants to allow the link to be up (driver_link_ready).
9834 * Then make sure the link is not automatically restarted
9835 * (link_enabled). Cancel any pending restart. And finally
9838 ppd->driver_link_ready = 0;
9839 ppd->link_enabled = 0;
9841 ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9842 flush_delayed_work(&ppd->start_link_work);
9843 cancel_delayed_work_sync(&ppd->start_link_work);
9845 ppd->offline_disabled_reason =
9846 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9847 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9848 OPA_LINKDOWN_REASON_REBOOT);
9849 set_link_state(ppd, HLS_DN_OFFLINE);
9851 /* disable the port */
9852 clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9853 cancel_work_sync(&ppd->freeze_work);
9856 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9858 struct hfi1_pportdata *ppd;
9861 ppd = (struct hfi1_pportdata *)(dd + 1);
9862 for (i = 0; i < dd->num_pports; i++, ppd++) {
9863 ppd->ibport_data.rvp.rc_acks = NULL;
9864 ppd->ibport_data.rvp.rc_qacks = NULL;
9865 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9866 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9867 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9868 if (!ppd->ibport_data.rvp.rc_acks ||
9869 !ppd->ibport_data.rvp.rc_delayed_comp ||
9870 !ppd->ibport_data.rvp.rc_qacks)
9878 * index is the index into the receive array
9880 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9881 u32 type, unsigned long pa, u16 order)
9885 if (!(dd->flags & HFI1_PRESENT))
9888 if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9891 } else if (type > PT_INVALID) {
9893 "unexpected receive array type %u for index %u, not handled\n",
9897 trace_hfi1_put_tid(dd, index, type, pa, order);
9899 #define RT_ADDR_SHIFT 12 /* 4KB kernel address boundary */
9900 reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9901 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9902 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9903 << RCV_ARRAY_RT_ADDR_SHIFT;
9904 trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9905 writeq(reg, dd->rcvarray_wc + (index * 8));
9907 if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9909 * Eager entries are written and flushed
9911 * Expected entries are flushed every 4 writes
9918 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9920 struct hfi1_devdata *dd = rcd->dd;
9923 /* this could be optimized */
9924 for (i = rcd->eager_base; i < rcd->eager_base +
9925 rcd->egrbufs.alloced; i++)
9926 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9928 for (i = rcd->expected_base;
9929 i < rcd->expected_base + rcd->expected_count; i++)
9930 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9933 static const char * const ib_cfg_name_strings[] = {
9934 "HFI1_IB_CFG_LIDLMC",
9935 "HFI1_IB_CFG_LWID_DG_ENB",
9936 "HFI1_IB_CFG_LWID_ENB",
9938 "HFI1_IB_CFG_SPD_ENB",
9940 "HFI1_IB_CFG_RXPOL_ENB",
9941 "HFI1_IB_CFG_LREV_ENB",
9942 "HFI1_IB_CFG_LINKLATENCY",
9943 "HFI1_IB_CFG_HRTBT",
9944 "HFI1_IB_CFG_OP_VLS",
9945 "HFI1_IB_CFG_VL_HIGH_CAP",
9946 "HFI1_IB_CFG_VL_LOW_CAP",
9947 "HFI1_IB_CFG_OVERRUN_THRESH",
9948 "HFI1_IB_CFG_PHYERR_THRESH",
9949 "HFI1_IB_CFG_LINKDEFAULT",
9950 "HFI1_IB_CFG_PKEYS",
9952 "HFI1_IB_CFG_LSTATE",
9953 "HFI1_IB_CFG_VL_HIGH_LIMIT",
9954 "HFI1_IB_CFG_PMA_TICKS",
9958 static const char *ib_cfg_name(int which)
9960 if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9962 return ib_cfg_name_strings[which];
9965 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9967 struct hfi1_devdata *dd = ppd->dd;
9971 case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9972 val = ppd->link_width_enabled;
9974 case HFI1_IB_CFG_LWID: /* currently active Link-width */
9975 val = ppd->link_width_active;
9977 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9978 val = ppd->link_speed_enabled;
9980 case HFI1_IB_CFG_SPD: /* current Link speed */
9981 val = ppd->link_speed_active;
9984 case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9985 case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9986 case HFI1_IB_CFG_LINKLATENCY:
9989 case HFI1_IB_CFG_OP_VLS:
9990 val = ppd->actual_vls_operational;
9992 case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9993 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9995 case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9996 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9998 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9999 val = ppd->overrun_threshold;
10001 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10002 val = ppd->phy_error_threshold;
10004 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10008 case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10009 case HFI1_IB_CFG_PMA_TICKS:
10012 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10015 "%s: which %s: not implemented\n",
10017 ib_cfg_name(which));
10025 * The largest MAD packet size.
10027 #define MAX_MAD_PACKET 2048
10030 * Return the maximum header bytes that can go on the _wire_
10031 * for this device. This count includes the ICRC which is
10032 * not part of the packet held in memory but it is appended
10034 * This is dependent on the device's receive header entry size.
10035 * HFI allows this to be set per-receive context, but the
10036 * driver presently enforces a global value.
10038 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10041 * The maximum non-payload (MTU) bytes in LRH.PktLen are
10042 * the Receive Header Entry Size minus the PBC (or RHF) size
10043 * plus one DW for the ICRC appended by HW.
10045 * dd->rcd[0].rcvhdrqentsize is in DW.
10046 * We use rcd[0] as all context will have the same value. Also,
10047 * the first kernel context would have been allocated by now so
10048 * we are guaranteed a valid value.
10050 return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10055 * @ppd - per port data
10057 * Set the MTU by limiting how many DWs may be sent. The SendLenCheck*
10058 * registers compare against LRH.PktLen, so use the max bytes included
10061 * This routine changes all VL values except VL15, which it maintains at
10064 static void set_send_length(struct hfi1_pportdata *ppd)
10066 struct hfi1_devdata *dd = ppd->dd;
10067 u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10068 u32 maxvlmtu = dd->vld[15].mtu;
10069 u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10070 & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10071 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10075 for (i = 0; i < ppd->vls_supported; i++) {
10076 if (dd->vld[i].mtu > maxvlmtu)
10077 maxvlmtu = dd->vld[i].mtu;
10079 len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10080 & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10081 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10083 len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10084 & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10085 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10087 write_csr(dd, SEND_LEN_CHECK0, len1);
10088 write_csr(dd, SEND_LEN_CHECK1, len2);
10089 /* adjust kernel credit return thresholds based on new MTUs */
10090 /* all kernel receive contexts have the same hdrqentsize */
10091 for (i = 0; i < ppd->vls_supported; i++) {
10092 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10093 sc_mtu_to_threshold(dd->vld[i].sc,
10095 dd->rcd[0]->rcvhdrqentsize));
10096 for (j = 0; j < INIT_SC_PER_VL; j++)
10097 sc_set_cr_threshold(
10098 pio_select_send_context_vl(dd, j, i),
10101 thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10102 sc_mtu_to_threshold(dd->vld[15].sc,
10104 dd->rcd[0]->rcvhdrqentsize));
10105 sc_set_cr_threshold(dd->vld[15].sc, thres);
10107 /* Adjust maximum MTU for the port in DC */
10108 dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10109 (ilog2(maxvlmtu >> 8) + 1);
10110 len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10111 len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10112 len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10113 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10114 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10117 static void set_lidlmc(struct hfi1_pportdata *ppd)
10121 struct hfi1_devdata *dd = ppd->dd;
10122 u32 mask = ~((1U << ppd->lmc) - 1);
10123 u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10127 * Program 0 in CSR if port lid is extended. This prevents
10128 * 9B packets being sent out for large lids.
10130 lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10131 c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10132 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10133 c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10134 << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10135 ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10136 << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10137 write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10140 * Iterate over all the send contexts and set their SLID check
10142 sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10143 SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10144 (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10145 SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10147 for (i = 0; i < chip_send_contexts(dd); i++) {
10148 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10150 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10153 /* Now we have to do the same thing for the sdma engines */
10154 sdma_update_lmc(dd, mask, lid);
10157 static const char *state_completed_string(u32 completed)
10159 static const char * const state_completed[] = {
10165 if (completed < ARRAY_SIZE(state_completed))
10166 return state_completed[completed];
10171 static const char all_lanes_dead_timeout_expired[] =
10172 "All lanes were inactive – was the interconnect media removed?";
10173 static const char tx_out_of_policy[] =
10174 "Passing lanes on local port do not meet the local link width policy";
10175 static const char no_state_complete[] =
10176 "State timeout occurred before link partner completed the state";
10177 static const char * const state_complete_reasons[] = {
10178 [0x00] = "Reason unknown",
10179 [0x01] = "Link was halted by driver, refer to LinkDownReason",
10180 [0x02] = "Link partner reported failure",
10181 [0x10] = "Unable to achieve frame sync on any lane",
10183 "Unable to find a common bit rate with the link partner",
10185 "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10187 "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10188 [0x14] = no_state_complete,
10190 "State timeout occurred before link partner identified equalization presets",
10192 "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10193 [0x17] = tx_out_of_policy,
10194 [0x20] = all_lanes_dead_timeout_expired,
10196 "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10197 [0x22] = no_state_complete,
10199 "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10200 [0x24] = tx_out_of_policy,
10201 [0x30] = all_lanes_dead_timeout_expired,
10203 "State timeout occurred waiting for host to process received frames",
10204 [0x32] = no_state_complete,
10206 "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10207 [0x34] = tx_out_of_policy,
10208 [0x35] = "Negotiated link width is mutually exclusive",
10210 "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10211 [0x37] = "Unable to resolve secure data exchange",
10214 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10217 const char *str = NULL;
10219 if (code < ARRAY_SIZE(state_complete_reasons))
10220 str = state_complete_reasons[code];
10227 /* describe the given last state complete frame */
10228 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10229 const char *prefix)
10231 struct hfi1_devdata *dd = ppd->dd;
10239 * [ 0: 0] - success
10241 * [ 7: 4] - next state timeout
10242 * [15: 8] - reason code
10245 success = frame & 0x1;
10246 state = (frame >> 1) & 0x7;
10247 reason = (frame >> 8) & 0xff;
10248 lanes = (frame >> 16) & 0xffff;
10250 dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10252 dd_dev_err(dd, " last reported state state: %s (0x%x)\n",
10253 state_completed_string(state), state);
10254 dd_dev_err(dd, " state successfully completed: %s\n",
10255 success ? "yes" : "no");
10256 dd_dev_err(dd, " fail reason 0x%x: %s\n",
10257 reason, state_complete_reason_code_string(ppd, reason));
10258 dd_dev_err(dd, " passing lane mask: 0x%x", lanes);
10262 * Read the last state complete frames and explain them. This routine
10263 * expects to be called if the link went down during link negotiation
10264 * and initialization (LNI). That is, anywhere between polling and link up.
10266 static void check_lni_states(struct hfi1_pportdata *ppd)
10268 u32 last_local_state;
10269 u32 last_remote_state;
10271 read_last_local_state(ppd->dd, &last_local_state);
10272 read_last_remote_state(ppd->dd, &last_remote_state);
10275 * Don't report anything if there is nothing to report. A value of
10276 * 0 means the link was taken down while polling and there was no
10277 * training in-process.
10279 if (last_local_state == 0 && last_remote_state == 0)
10282 decode_state_complete(ppd, last_local_state, "transmitted");
10283 decode_state_complete(ppd, last_remote_state, "received");
10286 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10287 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10290 unsigned long timeout;
10292 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10293 timeout = jiffies + msecs_to_jiffies(wait_ms);
10295 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10298 if (time_after(jiffies, timeout)) {
10300 "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10308 /* called when the logical link state is not down as it should be */
10309 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10311 struct hfi1_devdata *dd = ppd->dd;
10314 * Bring link up in LCB loopback
10316 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10317 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10318 DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10320 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10321 write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10322 write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10323 write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10325 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10326 (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10328 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10329 write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10331 wait_link_transfer_active(dd, 100);
10334 * Bring the link down again.
10336 write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10337 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10338 write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10340 dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10344 * Helper for set_link_state(). Do not call except from that routine.
10345 * Expects ppd->hls_mutex to be held.
10347 * @rem_reason value to be sent to the neighbor
10349 * LinkDownReasons only set if transition succeeds.
10351 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10353 struct hfi1_devdata *dd = ppd->dd;
10354 u32 previous_state;
10355 int offline_state_ret;
10358 update_lcb_cache(dd);
10360 previous_state = ppd->host_link_state;
10361 ppd->host_link_state = HLS_GOING_OFFLINE;
10363 /* start offline transition */
10364 ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10366 if (ret != HCMD_SUCCESS) {
10368 "Failed to transition to Offline link state, return %d\n",
10372 if (ppd->offline_disabled_reason ==
10373 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10374 ppd->offline_disabled_reason =
10375 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10377 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10378 if (offline_state_ret < 0)
10379 return offline_state_ret;
10381 /* Disabling AOC transmitters */
10382 if (ppd->port_type == PORT_TYPE_QSFP &&
10383 ppd->qsfp_info.limiting_active &&
10384 qsfp_mod_present(ppd)) {
10387 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10389 set_qsfp_tx(ppd, 0);
10390 release_chip_resource(dd, qsfp_resource(dd));
10392 /* not fatal, but should warn */
10394 "Unable to acquire lock to turn off QSFP TX\n");
10399 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10400 * can take a while for the link to go down.
10402 if (offline_state_ret != PLS_OFFLINE_QUIET) {
10403 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10409 * Now in charge of LCB - must be after the physical state is
10410 * offline.quiet and before host_link_state is changed.
10412 set_host_lcb_access(dd);
10413 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10415 /* make sure the logical state is also down */
10416 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10418 force_logical_link_state_down(ppd);
10420 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10421 update_statusp(ppd, IB_PORT_DOWN);
10424 * The LNI has a mandatory wait time after the physical state
10425 * moves to Offline.Quiet. The wait time may be different
10426 * depending on how the link went down. The 8051 firmware
10427 * will observe the needed wait time and only move to ready
10428 * when that is completed. The largest of the quiet timeouts
10429 * is 6s, so wait that long and then at least 0.5s more for
10430 * other transitions, and another 0.5s for a buffer.
10432 ret = wait_fm_ready(dd, 7000);
10435 "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10436 /* state is really offline, so make it so */
10437 ppd->host_link_state = HLS_DN_OFFLINE;
10442 * The state is now offline and the 8051 is ready to accept host
10444 * - change our state
10445 * - notify others if we were previously in a linkup state
10447 ppd->host_link_state = HLS_DN_OFFLINE;
10448 if (previous_state & HLS_UP) {
10449 /* went down while link was up */
10450 handle_linkup_change(dd, 0);
10451 } else if (previous_state
10452 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10453 /* went down while attempting link up */
10454 check_lni_states(ppd);
10456 /* The QSFP doesn't need to be reset on LNI failure */
10457 ppd->qsfp_info.reset_needed = 0;
10460 /* the active link width (downgrade) is 0 on link down */
10461 ppd->link_width_active = 0;
10462 ppd->link_width_downgrade_tx_active = 0;
10463 ppd->link_width_downgrade_rx_active = 0;
10464 ppd->current_egress_rate = 0;
10468 /* return the link state name */
10469 static const char *link_state_name(u32 state)
10472 int n = ilog2(state);
10473 static const char * const names[] = {
10474 [__HLS_UP_INIT_BP] = "INIT",
10475 [__HLS_UP_ARMED_BP] = "ARMED",
10476 [__HLS_UP_ACTIVE_BP] = "ACTIVE",
10477 [__HLS_DN_DOWNDEF_BP] = "DOWNDEF",
10478 [__HLS_DN_POLL_BP] = "POLL",
10479 [__HLS_DN_DISABLE_BP] = "DISABLE",
10480 [__HLS_DN_OFFLINE_BP] = "OFFLINE",
10481 [__HLS_VERIFY_CAP_BP] = "VERIFY_CAP",
10482 [__HLS_GOING_UP_BP] = "GOING_UP",
10483 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10484 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10487 name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10488 return name ? name : "unknown";
10491 /* return the link state reason name */
10492 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10494 if (state == HLS_UP_INIT) {
10495 switch (ppd->linkinit_reason) {
10496 case OPA_LINKINIT_REASON_LINKUP:
10498 case OPA_LINKINIT_REASON_FLAPPING:
10499 return "(FLAPPING)";
10500 case OPA_LINKINIT_OUTSIDE_POLICY:
10501 return "(OUTSIDE_POLICY)";
10502 case OPA_LINKINIT_QUARANTINED:
10503 return "(QUARANTINED)";
10504 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10505 return "(INSUFIC_CAPABILITY)";
10514 * driver_pstate - convert the driver's notion of a port's
10515 * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10516 * Return -1 (converted to a u32) to indicate error.
10518 u32 driver_pstate(struct hfi1_pportdata *ppd)
10520 switch (ppd->host_link_state) {
10523 case HLS_UP_ACTIVE:
10524 return IB_PORTPHYSSTATE_LINKUP;
10526 return IB_PORTPHYSSTATE_POLLING;
10527 case HLS_DN_DISABLE:
10528 return IB_PORTPHYSSTATE_DISABLED;
10529 case HLS_DN_OFFLINE:
10530 return OPA_PORTPHYSSTATE_OFFLINE;
10531 case HLS_VERIFY_CAP:
10532 return IB_PORTPHYSSTATE_TRAINING;
10534 return IB_PORTPHYSSTATE_TRAINING;
10535 case HLS_GOING_OFFLINE:
10536 return OPA_PORTPHYSSTATE_OFFLINE;
10537 case HLS_LINK_COOLDOWN:
10538 return OPA_PORTPHYSSTATE_OFFLINE;
10539 case HLS_DN_DOWNDEF:
10541 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10542 ppd->host_link_state);
10548 * driver_lstate - convert the driver's notion of a port's
10549 * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10550 * (converted to a u32) to indicate error.
10552 u32 driver_lstate(struct hfi1_pportdata *ppd)
10554 if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10555 return IB_PORT_DOWN;
10557 switch (ppd->host_link_state & HLS_UP) {
10559 return IB_PORT_INIT;
10561 return IB_PORT_ARMED;
10562 case HLS_UP_ACTIVE:
10563 return IB_PORT_ACTIVE;
10565 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10566 ppd->host_link_state);
10571 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10572 u8 neigh_reason, u8 rem_reason)
10574 if (ppd->local_link_down_reason.latest == 0 &&
10575 ppd->neigh_link_down_reason.latest == 0) {
10576 ppd->local_link_down_reason.latest = lcl_reason;
10577 ppd->neigh_link_down_reason.latest = neigh_reason;
10578 ppd->remote_link_down_reason = rem_reason;
10583 * data_vls_operational() - Verify if data VL BCT credits and MTU
10585 * @ppd: pointer to hfi1_pportdata structure
10587 * Return: true - Ok, false -otherwise.
10589 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10594 if (!ppd->actual_vls_operational)
10597 for (i = 0; i < ppd->vls_supported; i++) {
10598 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10599 if ((reg && !ppd->dd->vld[i].mtu) ||
10600 (!reg && ppd->dd->vld[i].mtu))
10608 * Change the physical and/or logical link state.
10610 * Do not call this routine while inside an interrupt. It contains
10611 * calls to routines that can take multiple seconds to finish.
10613 * Returns 0 on success, -errno on failure.
10615 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10617 struct hfi1_devdata *dd = ppd->dd;
10618 struct ib_event event = {.device = NULL};
10620 int orig_new_state, poll_bounce;
10622 mutex_lock(&ppd->hls_lock);
10624 orig_new_state = state;
10625 if (state == HLS_DN_DOWNDEF)
10626 state = HLS_DEFAULT;
10628 /* interpret poll -> poll as a link bounce */
10629 poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10630 state == HLS_DN_POLL;
10632 dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10633 link_state_name(ppd->host_link_state),
10634 link_state_name(orig_new_state),
10635 poll_bounce ? "(bounce) " : "",
10636 link_state_reason_name(ppd, state));
10639 * If we're going to a (HLS_*) link state that implies the logical
10640 * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10641 * reset is_sm_config_started to 0.
10643 if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10644 ppd->is_sm_config_started = 0;
10647 * Do nothing if the states match. Let a poll to poll link bounce
10650 if (ppd->host_link_state == state && !poll_bounce)
10655 if (ppd->host_link_state == HLS_DN_POLL &&
10656 (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10658 * Quick link up jumps from polling to here.
10660 * Whether in normal or loopback mode, the
10661 * simulator jumps from polling to link up.
10662 * Accept that here.
10665 } else if (ppd->host_link_state != HLS_GOING_UP) {
10670 * Wait for Link_Up physical state.
10671 * Physical and Logical states should already be
10672 * be transitioned to LinkUp and LinkInit respectively.
10674 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10677 "%s: physical state did not change to LINK-UP\n",
10682 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10685 "%s: logical state did not change to INIT\n",
10690 /* clear old transient LINKINIT_REASON code */
10691 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10692 ppd->linkinit_reason =
10693 OPA_LINKINIT_REASON_LINKUP;
10695 /* enable the port */
10696 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10698 handle_linkup_change(dd, 1);
10699 pio_kernel_linkup(dd);
10702 * After link up, a new link width will have been set.
10703 * Update the xmit counters with regards to the new
10706 update_xmit_counters(ppd, ppd->link_width_active);
10708 ppd->host_link_state = HLS_UP_INIT;
10709 update_statusp(ppd, IB_PORT_INIT);
10712 if (ppd->host_link_state != HLS_UP_INIT)
10715 if (!data_vls_operational(ppd)) {
10717 "%s: Invalid data VL credits or mtu\n",
10723 set_logical_state(dd, LSTATE_ARMED);
10724 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10727 "%s: logical state did not change to ARMED\n",
10731 ppd->host_link_state = HLS_UP_ARMED;
10732 update_statusp(ppd, IB_PORT_ARMED);
10734 * The simulator does not currently implement SMA messages,
10735 * so neighbor_normal is not set. Set it here when we first
10738 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10739 ppd->neighbor_normal = 1;
10741 case HLS_UP_ACTIVE:
10742 if (ppd->host_link_state != HLS_UP_ARMED)
10745 set_logical_state(dd, LSTATE_ACTIVE);
10746 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10749 "%s: logical state did not change to ACTIVE\n",
10752 /* tell all engines to go running */
10753 sdma_all_running(dd);
10754 ppd->host_link_state = HLS_UP_ACTIVE;
10755 update_statusp(ppd, IB_PORT_ACTIVE);
10757 /* Signal the IB layer that the port has went active */
10758 event.device = &dd->verbs_dev.rdi.ibdev;
10759 event.element.port_num = ppd->port;
10760 event.event = IB_EVENT_PORT_ACTIVE;
10764 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10765 ppd->host_link_state == HLS_DN_OFFLINE) &&
10768 /* Hand LED control to the DC */
10769 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10771 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10772 u8 tmp = ppd->link_enabled;
10774 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10776 ppd->link_enabled = tmp;
10779 ppd->remote_link_down_reason = 0;
10781 if (ppd->driver_link_ready)
10782 ppd->link_enabled = 1;
10785 set_all_slowpath(ppd->dd);
10786 ret = set_local_link_attributes(ppd);
10790 ppd->port_error_action = 0;
10792 if (quick_linkup) {
10793 /* quick linkup does not go into polling */
10794 ret = do_quick_linkup(dd);
10796 ret1 = set_physical_link_state(dd, PLS_POLLING);
10798 ret1 = wait_phys_link_out_of_offline(ppd,
10800 if (ret1 != HCMD_SUCCESS) {
10802 "Failed to transition to Polling link state, return 0x%x\n",
10809 * Change the host link state after requesting DC8051 to
10810 * change its physical state so that we can ignore any
10811 * interrupt with stale LNI(XX) error, which will not be
10812 * cleared until DC8051 transitions to Polling state.
10814 ppd->host_link_state = HLS_DN_POLL;
10815 ppd->offline_disabled_reason =
10816 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10818 * If an error occurred above, go back to offline. The
10819 * caller may reschedule another attempt.
10822 goto_offline(ppd, 0);
10824 log_physical_state(ppd, PLS_POLLING);
10826 case HLS_DN_DISABLE:
10827 /* link is disabled */
10828 ppd->link_enabled = 0;
10830 /* allow any state to transition to disabled */
10832 /* must transition to offline first */
10833 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10834 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10837 ppd->remote_link_down_reason = 0;
10840 if (!dd->dc_shutdown) {
10841 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10842 if (ret1 != HCMD_SUCCESS) {
10844 "Failed to transition to Disabled link state, return 0x%x\n",
10849 ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10852 "%s: physical state did not change to DISABLED\n",
10858 ppd->host_link_state = HLS_DN_DISABLE;
10860 case HLS_DN_OFFLINE:
10861 if (ppd->host_link_state == HLS_DN_DISABLE)
10864 /* allow any state to transition to offline */
10865 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10867 ppd->remote_link_down_reason = 0;
10869 case HLS_VERIFY_CAP:
10870 if (ppd->host_link_state != HLS_DN_POLL)
10872 ppd->host_link_state = HLS_VERIFY_CAP;
10873 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10876 if (ppd->host_link_state != HLS_VERIFY_CAP)
10879 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10880 if (ret1 != HCMD_SUCCESS) {
10882 "Failed to transition to link up state, return 0x%x\n",
10887 ppd->host_link_state = HLS_GOING_UP;
10890 case HLS_GOING_OFFLINE: /* transient within goto_offline() */
10891 case HLS_LINK_COOLDOWN: /* transient within goto_offline() */
10893 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10902 dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10903 __func__, link_state_name(ppd->host_link_state),
10904 link_state_name(state));
10908 mutex_unlock(&ppd->hls_lock);
10911 ib_dispatch_event(&event);
10916 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10922 case HFI1_IB_CFG_LIDLMC:
10925 case HFI1_IB_CFG_VL_HIGH_LIMIT:
10927 * The VL Arbitrator high limit is sent in units of 4k
10928 * bytes, while HFI stores it in units of 64 bytes.
10931 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10932 << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10933 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10935 case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10936 /* HFI only supports POLL as the default link down state */
10937 if (val != HLS_DN_POLL)
10940 case HFI1_IB_CFG_OP_VLS:
10941 if (ppd->vls_operational != val) {
10942 ppd->vls_operational = val;
10948 * For link width, link width downgrade, and speed enable, always AND
10949 * the setting with what is actually supported. This has two benefits.
10950 * First, enabled can't have unsupported values, no matter what the
10951 * SM or FM might want. Second, the ALL_SUPPORTED wildcards that mean
10952 * "fill in with your supported value" have all the bits in the
10953 * field set, so simply ANDing with supported has the desired result.
10955 case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10956 ppd->link_width_enabled = val & ppd->link_width_supported;
10958 case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10959 ppd->link_width_downgrade_enabled =
10960 val & ppd->link_width_downgrade_supported;
10962 case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10963 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10965 case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10967 * HFI does not follow IB specs, save this value
10968 * so we can report it, if asked.
10970 ppd->overrun_threshold = val;
10972 case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10974 * HFI does not follow IB specs, save this value
10975 * so we can report it, if asked.
10977 ppd->phy_error_threshold = val;
10980 case HFI1_IB_CFG_MTU:
10981 set_send_length(ppd);
10984 case HFI1_IB_CFG_PKEYS:
10985 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10986 set_partition_keys(ppd);
10990 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10991 dd_dev_info(ppd->dd,
10992 "%s: which %s, val 0x%x: not implemented\n",
10993 __func__, ib_cfg_name(which), val);
10999 /* begin functions related to vl arbitration table caching */
11000 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
11004 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11005 VL_ARB_LOW_PRIO_TABLE_SIZE);
11006 BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11007 VL_ARB_HIGH_PRIO_TABLE_SIZE);
11010 * Note that we always return values directly from the
11011 * 'vl_arb_cache' (and do no CSR reads) in response to a
11012 * 'Get(VLArbTable)'. This is obviously correct after a
11013 * 'Set(VLArbTable)', since the cache will then be up to
11014 * date. But it's also correct prior to any 'Set(VLArbTable)'
11015 * since then both the cache, and the relevant h/w registers
11019 for (i = 0; i < MAX_PRIO_TABLE; i++)
11020 spin_lock_init(&ppd->vl_arb_cache[i].lock);
11024 * vl_arb_lock_cache
11026 * All other vl_arb_* functions should be called only after locking
11029 static inline struct vl_arb_cache *
11030 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11032 if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11034 spin_lock(&ppd->vl_arb_cache[idx].lock);
11035 return &ppd->vl_arb_cache[idx];
11038 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11040 spin_unlock(&ppd->vl_arb_cache[idx].lock);
11043 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11044 struct ib_vl_weight_elem *vl)
11046 memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11049 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11050 struct ib_vl_weight_elem *vl)
11052 memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11055 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11056 struct ib_vl_weight_elem *vl)
11058 return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11061 /* end functions related to vl arbitration table caching */
11063 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11064 u32 size, struct ib_vl_weight_elem *vl)
11066 struct hfi1_devdata *dd = ppd->dd;
11068 unsigned int i, is_up = 0;
11069 int drain, ret = 0;
11071 mutex_lock(&ppd->hls_lock);
11073 if (ppd->host_link_state & HLS_UP)
11076 drain = !is_ax(dd) && is_up;
11080 * Before adjusting VL arbitration weights, empty per-VL
11081 * FIFOs, otherwise a packet whose VL weight is being
11082 * set to 0 could get stuck in a FIFO with no chance to
11085 ret = stop_drain_data_vls(dd);
11090 "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11095 for (i = 0; i < size; i++, vl++) {
11097 * NOTE: The low priority shift and mask are used here, but
11098 * they are the same for both the low and high registers.
11100 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11101 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11102 | (((u64)vl->weight
11103 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11104 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11105 write_csr(dd, target + (i * 8), reg);
11107 pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11110 open_fill_data_vls(dd); /* reopen all VLs */
11113 mutex_unlock(&ppd->hls_lock);
11119 * Read one credit merge VL register.
11121 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11122 struct vl_limit *vll)
11124 u64 reg = read_csr(dd, csr);
11126 vll->dedicated = cpu_to_be16(
11127 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11128 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11129 vll->shared = cpu_to_be16(
11130 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11131 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11135 * Read the current credit merge limits.
11137 static int get_buffer_control(struct hfi1_devdata *dd,
11138 struct buffer_control *bc, u16 *overall_limit)
11143 /* not all entries are filled in */
11144 memset(bc, 0, sizeof(*bc));
11146 /* OPA and HFI have a 1-1 mapping */
11147 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11148 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11150 /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11151 read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11153 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11154 bc->overall_shared_limit = cpu_to_be16(
11155 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11156 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11158 *overall_limit = (reg
11159 >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11160 & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11161 return sizeof(struct buffer_control);
11164 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11169 /* each register contains 16 SC->VLnt mappings, 4 bits each */
11170 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11171 for (i = 0; i < sizeof(u64); i++) {
11172 u8 byte = *(((u8 *)®) + i);
11174 dp->vlnt[2 * i] = byte & 0xf;
11175 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11178 reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11179 for (i = 0; i < sizeof(u64); i++) {
11180 u8 byte = *(((u8 *)®) + i);
11182 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11183 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11185 return sizeof(struct sc2vlnt);
11188 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11189 struct ib_vl_weight_elem *vl)
11193 for (i = 0; i < nelems; i++, vl++) {
11199 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11201 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11203 0, dp->vlnt[0] & 0xf,
11204 1, dp->vlnt[1] & 0xf,
11205 2, dp->vlnt[2] & 0xf,
11206 3, dp->vlnt[3] & 0xf,
11207 4, dp->vlnt[4] & 0xf,
11208 5, dp->vlnt[5] & 0xf,
11209 6, dp->vlnt[6] & 0xf,
11210 7, dp->vlnt[7] & 0xf,
11211 8, dp->vlnt[8] & 0xf,
11212 9, dp->vlnt[9] & 0xf,
11213 10, dp->vlnt[10] & 0xf,
11214 11, dp->vlnt[11] & 0xf,
11215 12, dp->vlnt[12] & 0xf,
11216 13, dp->vlnt[13] & 0xf,
11217 14, dp->vlnt[14] & 0xf,
11218 15, dp->vlnt[15] & 0xf));
11219 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11220 DC_SC_VL_VAL(31_16,
11221 16, dp->vlnt[16] & 0xf,
11222 17, dp->vlnt[17] & 0xf,
11223 18, dp->vlnt[18] & 0xf,
11224 19, dp->vlnt[19] & 0xf,
11225 20, dp->vlnt[20] & 0xf,
11226 21, dp->vlnt[21] & 0xf,
11227 22, dp->vlnt[22] & 0xf,
11228 23, dp->vlnt[23] & 0xf,
11229 24, dp->vlnt[24] & 0xf,
11230 25, dp->vlnt[25] & 0xf,
11231 26, dp->vlnt[26] & 0xf,
11232 27, dp->vlnt[27] & 0xf,
11233 28, dp->vlnt[28] & 0xf,
11234 29, dp->vlnt[29] & 0xf,
11235 30, dp->vlnt[30] & 0xf,
11236 31, dp->vlnt[31] & 0xf));
11239 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11243 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11244 what, (int)limit, idx);
11247 /* change only the shared limit portion of SendCmGLobalCredit */
11248 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11252 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11253 reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11254 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11255 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11258 /* change only the total credit limit portion of SendCmGLobalCredit */
11259 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11263 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11264 reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11265 reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11266 write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11269 /* set the given per-VL shared limit */
11270 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11275 if (vl < TXE_NUM_DATA_VL)
11276 addr = SEND_CM_CREDIT_VL + (8 * vl);
11278 addr = SEND_CM_CREDIT_VL15;
11280 reg = read_csr(dd, addr);
11281 reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11282 reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11283 write_csr(dd, addr, reg);
11286 /* set the given per-VL dedicated limit */
11287 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11292 if (vl < TXE_NUM_DATA_VL)
11293 addr = SEND_CM_CREDIT_VL + (8 * vl);
11295 addr = SEND_CM_CREDIT_VL15;
11297 reg = read_csr(dd, addr);
11298 reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11299 reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11300 write_csr(dd, addr, reg);
11303 /* spin until the given per-VL status mask bits clear */
11304 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11307 unsigned long timeout;
11310 timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11312 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11315 return; /* success */
11316 if (time_after(jiffies, timeout))
11317 break; /* timed out */
11322 "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11323 which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11325 * If this occurs, it is likely there was a credit loss on the link.
11326 * The only recovery from that is a link bounce.
11329 "Continuing anyway. A credit loss may occur. Suggest a link bounce\n");
11333 * The number of credits on the VLs may be changed while everything
11334 * is "live", but the following algorithm must be followed due to
11335 * how the hardware is actually implemented. In particular,
11336 * Return_Credit_Status[] is the only correct status check.
11338 * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11339 * set Global_Shared_Credit_Limit = 0
11341 * mask0 = all VLs that are changing either dedicated or shared limits
11342 * set Shared_Limit[mask0] = 0
11343 * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11344 * if (changing any dedicated limit)
11345 * mask1 = all VLs that are lowering dedicated limits
11346 * lower Dedicated_Limit[mask1]
11347 * spin until Return_Credit_Status[mask1] == 0
11348 * raise Dedicated_Limits
11349 * raise Shared_Limits
11350 * raise Global_Shared_Credit_Limit
11352 * lower = if the new limit is lower, set the limit to the new value
11353 * raise = if the new limit is higher than the current value (may be changed
11354 * earlier in the algorithm), set the new limit to the new value
11356 int set_buffer_control(struct hfi1_pportdata *ppd,
11357 struct buffer_control *new_bc)
11359 struct hfi1_devdata *dd = ppd->dd;
11360 u64 changing_mask, ld_mask, stat_mask;
11362 int i, use_all_mask;
11363 int this_shared_changing;
11364 int vl_count = 0, ret;
11366 * A0: add the variable any_shared_limit_changing below and in the
11367 * algorithm above. If removing A0 support, it can be removed.
11369 int any_shared_limit_changing;
11370 struct buffer_control cur_bc;
11371 u8 changing[OPA_MAX_VLS];
11372 u8 lowering_dedicated[OPA_MAX_VLS];
11375 const u64 all_mask =
11376 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11377 | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11378 | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11379 | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11380 | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11381 | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11382 | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11383 | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11384 | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11386 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11387 #define NUM_USABLE_VLS 16 /* look at VL15 and less */
11389 /* find the new total credits, do sanity check on unused VLs */
11390 for (i = 0; i < OPA_MAX_VLS; i++) {
11392 new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11395 nonzero_msg(dd, i, "dedicated",
11396 be16_to_cpu(new_bc->vl[i].dedicated));
11397 nonzero_msg(dd, i, "shared",
11398 be16_to_cpu(new_bc->vl[i].shared));
11399 new_bc->vl[i].dedicated = 0;
11400 new_bc->vl[i].shared = 0;
11402 new_total += be16_to_cpu(new_bc->overall_shared_limit);
11404 /* fetch the current values */
11405 get_buffer_control(dd, &cur_bc, &cur_total);
11408 * Create the masks we will use.
11410 memset(changing, 0, sizeof(changing));
11411 memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11413 * NOTE: Assumes that the individual VL bits are adjacent and in
11417 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11421 any_shared_limit_changing = 0;
11422 for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11425 this_shared_changing = new_bc->vl[i].shared
11426 != cur_bc.vl[i].shared;
11427 if (this_shared_changing)
11428 any_shared_limit_changing = 1;
11429 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11430 this_shared_changing) {
11432 changing_mask |= stat_mask;
11435 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11436 be16_to_cpu(cur_bc.vl[i].dedicated)) {
11437 lowering_dedicated[i] = 1;
11438 ld_mask |= stat_mask;
11442 /* bracket the credit change with a total adjustment */
11443 if (new_total > cur_total)
11444 set_global_limit(dd, new_total);
11447 * Start the credit change algorithm.
11450 if ((be16_to_cpu(new_bc->overall_shared_limit) <
11451 be16_to_cpu(cur_bc.overall_shared_limit)) ||
11452 (is_ax(dd) && any_shared_limit_changing)) {
11453 set_global_shared(dd, 0);
11454 cur_bc.overall_shared_limit = 0;
11458 for (i = 0; i < NUM_USABLE_VLS; i++) {
11463 set_vl_shared(dd, i, 0);
11464 cur_bc.vl[i].shared = 0;
11468 wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11471 if (change_count > 0) {
11472 for (i = 0; i < NUM_USABLE_VLS; i++) {
11476 if (lowering_dedicated[i]) {
11477 set_vl_dedicated(dd, i,
11478 be16_to_cpu(new_bc->
11480 cur_bc.vl[i].dedicated =
11481 new_bc->vl[i].dedicated;
11485 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11487 /* now raise all dedicated that are going up */
11488 for (i = 0; i < NUM_USABLE_VLS; i++) {
11492 if (be16_to_cpu(new_bc->vl[i].dedicated) >
11493 be16_to_cpu(cur_bc.vl[i].dedicated))
11494 set_vl_dedicated(dd, i,
11495 be16_to_cpu(new_bc->
11500 /* next raise all shared that are going up */
11501 for (i = 0; i < NUM_USABLE_VLS; i++) {
11505 if (be16_to_cpu(new_bc->vl[i].shared) >
11506 be16_to_cpu(cur_bc.vl[i].shared))
11507 set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11510 /* finally raise the global shared */
11511 if (be16_to_cpu(new_bc->overall_shared_limit) >
11512 be16_to_cpu(cur_bc.overall_shared_limit))
11513 set_global_shared(dd,
11514 be16_to_cpu(new_bc->overall_shared_limit));
11516 /* bracket the credit change with a total adjustment */
11517 if (new_total < cur_total)
11518 set_global_limit(dd, new_total);
11521 * Determine the actual number of operational VLS using the number of
11522 * dedicated and shared credits for each VL.
11524 if (change_count > 0) {
11525 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11526 if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11527 be16_to_cpu(new_bc->vl[i].shared) > 0)
11529 ppd->actual_vls_operational = vl_count;
11530 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11531 ppd->actual_vls_operational :
11532 ppd->vls_operational,
11535 ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11536 ppd->actual_vls_operational :
11537 ppd->vls_operational, NULL);
11545 * Read the given fabric manager table. Return the size of the
11546 * table (in bytes) on success, and a negative error code on
11549 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11553 struct vl_arb_cache *vlc;
11556 case FM_TBL_VL_HIGH_ARB:
11559 * OPA specifies 128 elements (of 2 bytes each), though
11560 * HFI supports only 16 elements in h/w.
11562 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11563 vl_arb_get_cache(vlc, t);
11564 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11566 case FM_TBL_VL_LOW_ARB:
11569 * OPA specifies 128 elements (of 2 bytes each), though
11570 * HFI supports only 16 elements in h/w.
11572 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11573 vl_arb_get_cache(vlc, t);
11574 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11576 case FM_TBL_BUFFER_CONTROL:
11577 size = get_buffer_control(ppd->dd, t, NULL);
11579 case FM_TBL_SC2VLNT:
11580 size = get_sc2vlnt(ppd->dd, t);
11582 case FM_TBL_VL_PREEMPT_ELEMS:
11584 /* OPA specifies 128 elements, of 2 bytes each */
11585 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11587 case FM_TBL_VL_PREEMPT_MATRIX:
11590 * OPA specifies that this is the same size as the VL
11591 * arbitration tables (i.e., 256 bytes).
11601 * Write the given fabric manager table.
11603 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11606 struct vl_arb_cache *vlc;
11609 case FM_TBL_VL_HIGH_ARB:
11610 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11611 if (vl_arb_match_cache(vlc, t)) {
11612 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11615 vl_arb_set_cache(vlc, t);
11616 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11617 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11618 VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11620 case FM_TBL_VL_LOW_ARB:
11621 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11622 if (vl_arb_match_cache(vlc, t)) {
11623 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11626 vl_arb_set_cache(vlc, t);
11627 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11628 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11629 VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11631 case FM_TBL_BUFFER_CONTROL:
11632 ret = set_buffer_control(ppd, t);
11634 case FM_TBL_SC2VLNT:
11635 set_sc2vlnt(ppd->dd, t);
11644 * Disable all data VLs.
11646 * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11648 static int disable_data_vls(struct hfi1_devdata *dd)
11653 pio_send_control(dd, PSC_DATA_VL_DISABLE);
11659 * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11660 * Just re-enables all data VLs (the "fill" part happens
11661 * automatically - the name was chosen for symmetry with
11662 * stop_drain_data_vls()).
11664 * Return 0 if successful, non-zero if the VLs cannot be enabled.
11666 int open_fill_data_vls(struct hfi1_devdata *dd)
11671 pio_send_control(dd, PSC_DATA_VL_ENABLE);
11677 * drain_data_vls() - assumes that disable_data_vls() has been called,
11678 * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11679 * engines to drop to 0.
11681 static void drain_data_vls(struct hfi1_devdata *dd)
11685 pause_for_credit_return(dd);
11689 * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11691 * Use open_fill_data_vls() to resume using data VLs. This pair is
11692 * meant to be used like this:
11694 * stop_drain_data_vls(dd);
11695 * // do things with per-VL resources
11696 * open_fill_data_vls(dd);
11698 int stop_drain_data_vls(struct hfi1_devdata *dd)
11702 ret = disable_data_vls(dd);
11704 drain_data_vls(dd);
11710 * Convert a nanosecond time to a cclock count. No matter how slow
11711 * the cclock, a non-zero ns will always have a non-zero result.
11713 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11717 if (dd->icode == ICODE_FPGA_EMULATION)
11718 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11719 else /* simulation pretends to be ASIC */
11720 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11721 if (ns && !cclocks) /* if ns nonzero, must be at least 1 */
11727 * Convert a cclock count to nanoseconds. Not matter how slow
11728 * the cclock, a non-zero cclocks will always have a non-zero result.
11730 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11734 if (dd->icode == ICODE_FPGA_EMULATION)
11735 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11736 else /* simulation pretends to be ASIC */
11737 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11738 if (cclocks && !ns)
11744 * Dynamically adjust the receive interrupt timeout for a context based on
11745 * incoming packet rate.
11747 * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11749 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11751 struct hfi1_devdata *dd = rcd->dd;
11752 u32 timeout = rcd->rcvavail_timeout;
11755 * This algorithm doubles or halves the timeout depending on whether
11756 * the number of packets received in this interrupt were less than or
11757 * greater equal the interrupt count.
11759 * The calculations below do not allow a steady state to be achieved.
11760 * Only at the endpoints it is possible to have an unchanging
11763 if (npkts < rcv_intr_count) {
11765 * Not enough packets arrived before the timeout, adjust
11766 * timeout downward.
11768 if (timeout < 2) /* already at minimum? */
11773 * More than enough packets arrived before the timeout, adjust
11776 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11778 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11781 rcd->rcvavail_timeout = timeout;
11783 * timeout cannot be larger than rcv_intr_timeout_csr which has already
11784 * been verified to be in range
11786 write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11788 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11791 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11792 u32 intr_adjust, u32 npkts)
11794 struct hfi1_devdata *dd = rcd->dd;
11796 u32 ctxt = rcd->ctxt;
11799 * Need to write timeout register before updating RcvHdrHead to ensure
11800 * that a new value is used when the HW decides to restart counting.
11803 adjust_rcv_timeout(rcd, npkts);
11805 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11806 << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11807 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11809 reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11810 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11811 << RCV_HDR_HEAD_HEAD_SHIFT);
11812 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11815 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11819 head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11820 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11822 if (rcd->rcvhdrtail_kvaddr)
11823 tail = get_rcvhdrtail(rcd);
11825 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11827 return head == tail;
11831 * Context Control and Receive Array encoding for buffer size:
11840 * 0x8 512 KB (Receive Array only)
11841 * 0x9 1 MB (Receive Array only)
11842 * 0xa 2 MB (Receive Array only)
11844 * 0xB-0xF - reserved (Receive Array only)
11847 * This routine assumes that the value has already been sanity checked.
11849 static u32 encoded_size(u32 size)
11852 case 4 * 1024: return 0x1;
11853 case 8 * 1024: return 0x2;
11854 case 16 * 1024: return 0x3;
11855 case 32 * 1024: return 0x4;
11856 case 64 * 1024: return 0x5;
11857 case 128 * 1024: return 0x6;
11858 case 256 * 1024: return 0x7;
11859 case 512 * 1024: return 0x8;
11860 case 1 * 1024 * 1024: return 0x9;
11861 case 2 * 1024 * 1024: return 0xa;
11863 return 0x1; /* if invalid, go with the minimum size */
11866 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11867 struct hfi1_ctxtdata *rcd)
11870 int did_enable = 0;
11878 hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11880 rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11881 /* if the context already enabled, don't do the extra steps */
11882 if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11883 !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11884 /* reset the tail and hdr addresses, and sequence count */
11885 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11887 if (rcd->rcvhdrtail_kvaddr)
11888 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11889 rcd->rcvhdrqtailaddr_dma);
11892 /* reset the cached receive header queue head value */
11896 * Zero the receive header queue so we don't get false
11897 * positives when checking the sequence number. The
11898 * sequence numbers could land exactly on the same spot.
11899 * E.g. a rcd restart before the receive header wrapped.
11901 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
11903 /* starting timeout */
11904 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11906 /* enable the context */
11907 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11909 /* clean the egr buffer size first */
11910 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11911 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11912 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11913 << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11915 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11916 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11919 /* zero RcvEgrIndexHead */
11920 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11922 /* set eager count and base index */
11923 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11924 & RCV_EGR_CTRL_EGR_CNT_MASK)
11925 << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11926 (((rcd->eager_base >> RCV_SHIFT)
11927 & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11928 << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11929 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11932 * Set TID (expected) count and base index.
11933 * rcd->expected_count is set to individual RcvArray entries,
11934 * not pairs, and the CSR takes a pair-count in groups of
11935 * four, so divide by 8.
11937 reg = (((rcd->expected_count >> RCV_SHIFT)
11938 & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11939 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11940 (((rcd->expected_base >> RCV_SHIFT)
11941 & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11942 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11943 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11944 if (ctxt == HFI1_CTRL_CTXT)
11945 write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11947 if (op & HFI1_RCVCTRL_CTXT_DIS) {
11948 write_csr(dd, RCV_VL15, 0);
11950 * When receive context is being disabled turn on tail
11951 * update with a dummy tail address and then disable
11954 if (dd->rcvhdrtail_dummy_dma) {
11955 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11956 dd->rcvhdrtail_dummy_dma);
11957 /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11958 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11961 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11963 if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
11964 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11965 IS_RCVAVAIL_START + rcd->ctxt, true);
11966 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11968 if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
11969 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11970 IS_RCVAVAIL_START + rcd->ctxt, false);
11971 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11973 if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11974 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11975 if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11976 /* See comment on RcvCtxtCtrl.TailUpd above */
11977 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11978 rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11980 if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11981 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11982 if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11983 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11984 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11986 * In one-packet-per-eager mode, the size comes from
11987 * the RcvArray entry.
11989 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11990 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11992 if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11993 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11994 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11995 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11996 if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11997 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11998 if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11999 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12000 if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12001 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12002 if (op & HFI1_RCVCTRL_URGENT_ENB)
12003 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12004 IS_RCVURGENT_START + rcd->ctxt, true);
12005 if (op & HFI1_RCVCTRL_URGENT_DIS)
12006 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12007 IS_RCVURGENT_START + rcd->ctxt, false);
12009 hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12010 write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12012 /* work around sticky RcvCtxtStatus.BlockedRHQFull */
12014 (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12015 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12017 dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12019 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12020 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12021 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12022 read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12023 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12024 dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12025 ctxt, reg, reg == 0 ? "not" : "still");
12031 * The interrupt timeout and count must be set after
12032 * the context is enabled to take effect.
12034 /* set interrupt timeout */
12035 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12036 (u64)rcd->rcvavail_timeout <<
12037 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12039 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12040 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12041 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12044 if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12046 * If the context has been disabled and the Tail Update has
12047 * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12048 * so it doesn't contain an address that is invalid.
12050 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12051 dd->rcvhdrtail_dummy_dma);
12054 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12060 ret = dd->cntrnameslen;
12061 *namep = dd->cntrnames;
12063 const struct cntr_entry *entry;
12066 ret = (dd->ndevcntrs) * sizeof(u64);
12068 /* Get the start of the block of counters */
12069 *cntrp = dd->cntrs;
12072 * Now go and fill in each counter in the block.
12074 for (i = 0; i < DEV_CNTR_LAST; i++) {
12075 entry = &dev_cntrs[i];
12076 hfi1_cdbg(CNTR, "reading %s", entry->name);
12077 if (entry->flags & CNTR_DISABLED) {
12079 hfi1_cdbg(CNTR, "\tDisabled\n");
12081 if (entry->flags & CNTR_VL) {
12082 hfi1_cdbg(CNTR, "\tPer VL\n");
12083 for (j = 0; j < C_VL_COUNT; j++) {
12084 val = entry->rw_cntr(entry,
12090 "\t\tRead 0x%llx for %d\n",
12092 dd->cntrs[entry->offset + j] =
12095 } else if (entry->flags & CNTR_SDMA) {
12097 "\t Per SDMA Engine\n");
12098 for (j = 0; j < chip_sdma_engines(dd);
12101 entry->rw_cntr(entry, dd, j,
12104 "\t\tRead 0x%llx for %d\n",
12106 dd->cntrs[entry->offset + j] =
12110 val = entry->rw_cntr(entry, dd,
12113 dd->cntrs[entry->offset] = val;
12114 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12123 * Used by sysfs to create files for hfi stats to read
12125 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12131 ret = ppd->dd->portcntrnameslen;
12132 *namep = ppd->dd->portcntrnames;
12134 const struct cntr_entry *entry;
12137 ret = ppd->dd->nportcntrs * sizeof(u64);
12138 *cntrp = ppd->cntrs;
12140 for (i = 0; i < PORT_CNTR_LAST; i++) {
12141 entry = &port_cntrs[i];
12142 hfi1_cdbg(CNTR, "reading %s", entry->name);
12143 if (entry->flags & CNTR_DISABLED) {
12145 hfi1_cdbg(CNTR, "\tDisabled\n");
12149 if (entry->flags & CNTR_VL) {
12150 hfi1_cdbg(CNTR, "\tPer VL");
12151 for (j = 0; j < C_VL_COUNT; j++) {
12152 val = entry->rw_cntr(entry, ppd, j,
12157 "\t\tRead 0x%llx for %d",
12159 ppd->cntrs[entry->offset + j] = val;
12162 val = entry->rw_cntr(entry, ppd,
12166 ppd->cntrs[entry->offset] = val;
12167 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12174 static void free_cntrs(struct hfi1_devdata *dd)
12176 struct hfi1_pportdata *ppd;
12179 if (dd->synth_stats_timer.function)
12180 del_timer_sync(&dd->synth_stats_timer);
12181 ppd = (struct hfi1_pportdata *)(dd + 1);
12182 for (i = 0; i < dd->num_pports; i++, ppd++) {
12184 kfree(ppd->scntrs);
12185 free_percpu(ppd->ibport_data.rvp.rc_acks);
12186 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12187 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12189 ppd->scntrs = NULL;
12190 ppd->ibport_data.rvp.rc_acks = NULL;
12191 ppd->ibport_data.rvp.rc_qacks = NULL;
12192 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12194 kfree(dd->portcntrnames);
12195 dd->portcntrnames = NULL;
12200 kfree(dd->cntrnames);
12201 dd->cntrnames = NULL;
12202 if (dd->update_cntr_wq) {
12203 destroy_workqueue(dd->update_cntr_wq);
12204 dd->update_cntr_wq = NULL;
12208 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12209 u64 *psval, void *context, int vl)
12214 if (entry->flags & CNTR_DISABLED) {
12215 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12219 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12221 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12223 /* If its a synthetic counter there is more work we need to do */
12224 if (entry->flags & CNTR_SYNTH) {
12225 if (sval == CNTR_MAX) {
12226 /* No need to read already saturated */
12230 if (entry->flags & CNTR_32BIT) {
12231 /* 32bit counters can wrap multiple times */
12232 u64 upper = sval >> 32;
12233 u64 lower = (sval << 32) >> 32;
12235 if (lower > val) { /* hw wrapped */
12236 if (upper == CNTR_32BIT_MAX)
12242 if (val != CNTR_MAX)
12243 val = (upper << 32) | val;
12246 /* If we rolled we are saturated */
12247 if ((val < sval) || (val > CNTR_MAX))
12254 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12259 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12260 struct cntr_entry *entry,
12261 u64 *psval, void *context, int vl, u64 data)
12265 if (entry->flags & CNTR_DISABLED) {
12266 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12270 hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12272 if (entry->flags & CNTR_SYNTH) {
12274 if (entry->flags & CNTR_32BIT) {
12275 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12276 (data << 32) >> 32);
12277 val = data; /* return the full 64bit value */
12279 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12283 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12288 hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12293 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12295 struct cntr_entry *entry;
12298 entry = &dev_cntrs[index];
12299 sval = dd->scntrs + entry->offset;
12301 if (vl != CNTR_INVALID_VL)
12304 return read_dev_port_cntr(dd, entry, sval, dd, vl);
12307 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12309 struct cntr_entry *entry;
12312 entry = &dev_cntrs[index];
12313 sval = dd->scntrs + entry->offset;
12315 if (vl != CNTR_INVALID_VL)
12318 return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12321 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12323 struct cntr_entry *entry;
12326 entry = &port_cntrs[index];
12327 sval = ppd->scntrs + entry->offset;
12329 if (vl != CNTR_INVALID_VL)
12332 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12333 (index <= C_RCV_HDR_OVF_LAST)) {
12334 /* We do not want to bother for disabled contexts */
12338 return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12341 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12343 struct cntr_entry *entry;
12346 entry = &port_cntrs[index];
12347 sval = ppd->scntrs + entry->offset;
12349 if (vl != CNTR_INVALID_VL)
12352 if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12353 (index <= C_RCV_HDR_OVF_LAST)) {
12354 /* We do not want to bother for disabled contexts */
12358 return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12361 static void do_update_synth_timer(struct work_struct *work)
12368 struct hfi1_pportdata *ppd;
12369 struct cntr_entry *entry;
12370 struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12374 * Rather than keep beating on the CSRs pick a minimal set that we can
12375 * check to watch for potential roll over. We can do this by looking at
12376 * the number of flits sent/recv. If the total flits exceeds 32bits then
12377 * we have to iterate all the counters and update.
12379 entry = &dev_cntrs[C_DC_RCV_FLITS];
12380 cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12382 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12383 cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12387 "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12388 dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12390 if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12392 * May not be strictly necessary to update but it won't hurt and
12393 * simplifies the logic here.
12396 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12399 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12401 "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12402 total_flits, (u64)CNTR_32BIT_MAX);
12403 if (total_flits >= CNTR_32BIT_MAX) {
12404 hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12411 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12412 for (i = 0; i < DEV_CNTR_LAST; i++) {
12413 entry = &dev_cntrs[i];
12414 if (entry->flags & CNTR_VL) {
12415 for (vl = 0; vl < C_VL_COUNT; vl++)
12416 read_dev_cntr(dd, i, vl);
12418 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12421 ppd = (struct hfi1_pportdata *)(dd + 1);
12422 for (i = 0; i < dd->num_pports; i++, ppd++) {
12423 for (j = 0; j < PORT_CNTR_LAST; j++) {
12424 entry = &port_cntrs[j];
12425 if (entry->flags & CNTR_VL) {
12426 for (vl = 0; vl < C_VL_COUNT; vl++)
12427 read_port_cntr(ppd, j, vl);
12429 read_port_cntr(ppd, j, CNTR_INVALID_VL);
12435 * We want the value in the register. The goal is to keep track
12436 * of the number of "ticks" not the counter value. In other
12437 * words if the register rolls we want to notice it and go ahead
12438 * and force an update.
12440 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12441 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12444 entry = &dev_cntrs[C_DC_RCV_FLITS];
12445 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12448 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12449 dd->unit, dd->last_tx, dd->last_rx);
12452 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12456 static void update_synth_timer(struct timer_list *t)
12458 struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12460 queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12461 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12464 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12465 static int init_cntrs(struct hfi1_devdata *dd)
12467 int i, rcv_ctxts, j;
12470 char name[C_MAX_NAME];
12471 struct hfi1_pportdata *ppd;
12472 const char *bit_type_32 = ",32";
12473 const int bit_type_32_sz = strlen(bit_type_32);
12474 u32 sdma_engines = chip_sdma_engines(dd);
12476 /* set up the stats timer; the add_timer is done at the end */
12477 timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12479 /***********************/
12480 /* per device counters */
12481 /***********************/
12483 /* size names and determine how many we have*/
12487 for (i = 0; i < DEV_CNTR_LAST; i++) {
12488 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12489 hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12493 if (dev_cntrs[i].flags & CNTR_VL) {
12494 dev_cntrs[i].offset = dd->ndevcntrs;
12495 for (j = 0; j < C_VL_COUNT; j++) {
12496 snprintf(name, C_MAX_NAME, "%s%d",
12497 dev_cntrs[i].name, vl_from_idx(j));
12498 sz += strlen(name);
12499 /* Add ",32" for 32-bit counters */
12500 if (dev_cntrs[i].flags & CNTR_32BIT)
12501 sz += bit_type_32_sz;
12505 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12506 dev_cntrs[i].offset = dd->ndevcntrs;
12507 for (j = 0; j < sdma_engines; j++) {
12508 snprintf(name, C_MAX_NAME, "%s%d",
12509 dev_cntrs[i].name, j);
12510 sz += strlen(name);
12511 /* Add ",32" for 32-bit counters */
12512 if (dev_cntrs[i].flags & CNTR_32BIT)
12513 sz += bit_type_32_sz;
12518 /* +1 for newline. */
12519 sz += strlen(dev_cntrs[i].name) + 1;
12520 /* Add ",32" for 32-bit counters */
12521 if (dev_cntrs[i].flags & CNTR_32BIT)
12522 sz += bit_type_32_sz;
12523 dev_cntrs[i].offset = dd->ndevcntrs;
12528 /* allocate space for the counter values */
12529 dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12534 dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12538 /* allocate space for the counter names */
12539 dd->cntrnameslen = sz;
12540 dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12541 if (!dd->cntrnames)
12544 /* fill in the names */
12545 for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12546 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12548 } else if (dev_cntrs[i].flags & CNTR_VL) {
12549 for (j = 0; j < C_VL_COUNT; j++) {
12550 snprintf(name, C_MAX_NAME, "%s%d",
12553 memcpy(p, name, strlen(name));
12556 /* Counter is 32 bits */
12557 if (dev_cntrs[i].flags & CNTR_32BIT) {
12558 memcpy(p, bit_type_32, bit_type_32_sz);
12559 p += bit_type_32_sz;
12564 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12565 for (j = 0; j < sdma_engines; j++) {
12566 snprintf(name, C_MAX_NAME, "%s%d",
12567 dev_cntrs[i].name, j);
12568 memcpy(p, name, strlen(name));
12571 /* Counter is 32 bits */
12572 if (dev_cntrs[i].flags & CNTR_32BIT) {
12573 memcpy(p, bit_type_32, bit_type_32_sz);
12574 p += bit_type_32_sz;
12580 memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12581 p += strlen(dev_cntrs[i].name);
12583 /* Counter is 32 bits */
12584 if (dev_cntrs[i].flags & CNTR_32BIT) {
12585 memcpy(p, bit_type_32, bit_type_32_sz);
12586 p += bit_type_32_sz;
12593 /*********************/
12594 /* per port counters */
12595 /*********************/
12598 * Go through the counters for the overflows and disable the ones we
12599 * don't need. This varies based on platform so we need to do it
12600 * dynamically here.
12602 rcv_ctxts = dd->num_rcv_contexts;
12603 for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12604 i <= C_RCV_HDR_OVF_LAST; i++) {
12605 port_cntrs[i].flags |= CNTR_DISABLED;
12608 /* size port counter names and determine how many we have*/
12610 dd->nportcntrs = 0;
12611 for (i = 0; i < PORT_CNTR_LAST; i++) {
12612 if (port_cntrs[i].flags & CNTR_DISABLED) {
12613 hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12617 if (port_cntrs[i].flags & CNTR_VL) {
12618 port_cntrs[i].offset = dd->nportcntrs;
12619 for (j = 0; j < C_VL_COUNT; j++) {
12620 snprintf(name, C_MAX_NAME, "%s%d",
12621 port_cntrs[i].name, vl_from_idx(j));
12622 sz += strlen(name);
12623 /* Add ",32" for 32-bit counters */
12624 if (port_cntrs[i].flags & CNTR_32BIT)
12625 sz += bit_type_32_sz;
12630 /* +1 for newline */
12631 sz += strlen(port_cntrs[i].name) + 1;
12632 /* Add ",32" for 32-bit counters */
12633 if (port_cntrs[i].flags & CNTR_32BIT)
12634 sz += bit_type_32_sz;
12635 port_cntrs[i].offset = dd->nportcntrs;
12640 /* allocate space for the counter names */
12641 dd->portcntrnameslen = sz;
12642 dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12643 if (!dd->portcntrnames)
12646 /* fill in port cntr names */
12647 for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12648 if (port_cntrs[i].flags & CNTR_DISABLED)
12651 if (port_cntrs[i].flags & CNTR_VL) {
12652 for (j = 0; j < C_VL_COUNT; j++) {
12653 snprintf(name, C_MAX_NAME, "%s%d",
12654 port_cntrs[i].name, vl_from_idx(j));
12655 memcpy(p, name, strlen(name));
12658 /* Counter is 32 bits */
12659 if (port_cntrs[i].flags & CNTR_32BIT) {
12660 memcpy(p, bit_type_32, bit_type_32_sz);
12661 p += bit_type_32_sz;
12667 memcpy(p, port_cntrs[i].name,
12668 strlen(port_cntrs[i].name));
12669 p += strlen(port_cntrs[i].name);
12671 /* Counter is 32 bits */
12672 if (port_cntrs[i].flags & CNTR_32BIT) {
12673 memcpy(p, bit_type_32, bit_type_32_sz);
12674 p += bit_type_32_sz;
12681 /* allocate per port storage for counter values */
12682 ppd = (struct hfi1_pportdata *)(dd + 1);
12683 for (i = 0; i < dd->num_pports; i++, ppd++) {
12684 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12688 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12693 /* CPU counters need to be allocated and zeroed */
12694 if (init_cpu_counters(dd))
12697 dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12698 WQ_MEM_RECLAIM, dd->unit);
12699 if (!dd->update_cntr_wq)
12702 INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12704 mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12711 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12713 switch (chip_lstate) {
12716 "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12720 return IB_PORT_DOWN;
12722 return IB_PORT_INIT;
12724 return IB_PORT_ARMED;
12725 case LSTATE_ACTIVE:
12726 return IB_PORT_ACTIVE;
12730 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12732 /* look at the HFI meta-states only */
12733 switch (chip_pstate & 0xf0) {
12735 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12739 return IB_PORTPHYSSTATE_DISABLED;
12741 return OPA_PORTPHYSSTATE_OFFLINE;
12743 return IB_PORTPHYSSTATE_POLLING;
12744 case PLS_CONFIGPHY:
12745 return IB_PORTPHYSSTATE_TRAINING;
12747 return IB_PORTPHYSSTATE_LINKUP;
12749 return IB_PORTPHYSSTATE_PHY_TEST;
12753 /* return the OPA port logical state name */
12754 const char *opa_lstate_name(u32 lstate)
12756 static const char * const port_logical_names[] = {
12762 "PORT_ACTIVE_DEFER",
12764 if (lstate < ARRAY_SIZE(port_logical_names))
12765 return port_logical_names[lstate];
12769 /* return the OPA port physical state name */
12770 const char *opa_pstate_name(u32 pstate)
12772 static const char * const port_physical_names[] = {
12779 "PHYS_LINK_ERR_RECOVER",
12786 if (pstate < ARRAY_SIZE(port_physical_names))
12787 return port_physical_names[pstate];
12792 * update_statusp - Update userspace status flag
12793 * @ppd: Port data structure
12794 * @state: port state information
12796 * Actual port status is determined by the host_link_state value
12799 * host_link_state MUST be updated before updating the user space
12802 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12805 * Set port status flags in the page mapped into userspace
12806 * memory. Do it here to ensure a reliable state - this is
12807 * the only function called by all state handling code.
12808 * Always set the flags due to the fact that the cache value
12809 * might have been changed explicitly outside of this
12812 if (ppd->statusp) {
12816 *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12817 HFI1_STATUS_IB_READY);
12819 case IB_PORT_ARMED:
12820 *ppd->statusp |= HFI1_STATUS_IB_CONF;
12822 case IB_PORT_ACTIVE:
12823 *ppd->statusp |= HFI1_STATUS_IB_READY;
12827 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12828 opa_lstate_name(state), state);
12832 * wait_logical_linkstate - wait for an IB link state change to occur
12833 * @ppd: port device
12834 * @state: the state to wait for
12835 * @msecs: the number of milliseconds to wait
12837 * Wait up to msecs milliseconds for IB link state change to occur.
12838 * For now, take the easy polling route.
12839 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12841 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12844 unsigned long timeout;
12847 timeout = jiffies + msecs_to_jiffies(msecs);
12849 new_state = chip_to_opa_lstate(ppd->dd,
12850 read_logical_state(ppd->dd));
12851 if (new_state == state)
12853 if (time_after(jiffies, timeout)) {
12854 dd_dev_err(ppd->dd,
12855 "timeout waiting for link state 0x%x\n",
12865 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12867 u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12869 dd_dev_info(ppd->dd,
12870 "physical state changed to %s (0x%x), phy 0x%x\n",
12871 opa_pstate_name(ib_pstate), ib_pstate, state);
12875 * Read the physical hardware link state and check if it matches host
12876 * drivers anticipated state.
12878 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12880 u32 read_state = read_physical_state(ppd->dd);
12882 if (read_state == state) {
12883 log_state_transition(ppd, state);
12885 dd_dev_err(ppd->dd,
12886 "anticipated phy link state 0x%x, read 0x%x\n",
12887 state, read_state);
12892 * wait_physical_linkstate - wait for an physical link state change to occur
12893 * @ppd: port device
12894 * @state: the state to wait for
12895 * @msecs: the number of milliseconds to wait
12897 * Wait up to msecs milliseconds for physical link state change to occur.
12898 * Returns 0 if state reached, otherwise -ETIMEDOUT.
12900 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12904 unsigned long timeout;
12906 timeout = jiffies + msecs_to_jiffies(msecs);
12908 read_state = read_physical_state(ppd->dd);
12909 if (read_state == state)
12911 if (time_after(jiffies, timeout)) {
12912 dd_dev_err(ppd->dd,
12913 "timeout waiting for phy link state 0x%x\n",
12917 usleep_range(1950, 2050); /* sleep 2ms-ish */
12920 log_state_transition(ppd, state);
12925 * wait_phys_link_offline_quiet_substates - wait for any offline substate
12926 * @ppd: port device
12927 * @msecs: the number of milliseconds to wait
12929 * Wait up to msecs milliseconds for any offline physical link
12930 * state change to occur.
12931 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12933 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12937 unsigned long timeout;
12939 timeout = jiffies + msecs_to_jiffies(msecs);
12941 read_state = read_physical_state(ppd->dd);
12942 if ((read_state & 0xF0) == PLS_OFFLINE)
12944 if (time_after(jiffies, timeout)) {
12945 dd_dev_err(ppd->dd,
12946 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12947 read_state, msecs);
12950 usleep_range(1950, 2050); /* sleep 2ms-ish */
12953 log_state_transition(ppd, read_state);
12958 * wait_phys_link_out_of_offline - wait for any out of offline state
12959 * @ppd: port device
12960 * @msecs: the number of milliseconds to wait
12962 * Wait up to msecs milliseconds for any out of offline physical link
12963 * state change to occur.
12964 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12966 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
12970 unsigned long timeout;
12972 timeout = jiffies + msecs_to_jiffies(msecs);
12974 read_state = read_physical_state(ppd->dd);
12975 if ((read_state & 0xF0) != PLS_OFFLINE)
12977 if (time_after(jiffies, timeout)) {
12978 dd_dev_err(ppd->dd,
12979 "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
12980 read_state, msecs);
12983 usleep_range(1950, 2050); /* sleep 2ms-ish */
12986 log_state_transition(ppd, read_state);
12990 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12991 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12993 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12994 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12996 void hfi1_init_ctxt(struct send_context *sc)
12999 struct hfi1_devdata *dd = sc->dd;
13001 u8 set = (sc->type == SC_USER ?
13002 HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13003 HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13004 reg = read_kctxt_csr(dd, sc->hw_context,
13005 SEND_CTXT_CHECK_ENABLE);
13007 CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13009 SET_STATIC_RATE_CONTROL_SMASK(reg);
13010 write_kctxt_csr(dd, sc->hw_context,
13011 SEND_CTXT_CHECK_ENABLE, reg);
13015 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13020 if (dd->icode != ICODE_RTL_SILICON) {
13021 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13022 dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13026 reg = read_csr(dd, ASIC_STS_THERM);
13027 temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13028 ASIC_STS_THERM_CURR_TEMP_MASK);
13029 temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13030 ASIC_STS_THERM_LO_TEMP_MASK);
13031 temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13032 ASIC_STS_THERM_HI_TEMP_MASK);
13033 temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13034 ASIC_STS_THERM_CRIT_TEMP_MASK);
13035 /* triggers is a 3-bit value - 1 bit per trigger. */
13036 temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13041 /* ========================================================================= */
13044 * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13045 * @dd: valid devdata
13046 * @src: IRQ source to determine register index from
13047 * @bits: the bits to set or clear
13048 * @set: true == set the bits, false == clear the bits
13051 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
13055 u16 idx = src / BITS_PER_REGISTER;
13057 spin_lock(&dd->irq_src_lock);
13058 reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
13063 write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
13064 spin_unlock(&dd->irq_src_lock);
13068 * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13069 * @dd: valid devdata
13070 * @first: first IRQ source to set/clear
13071 * @last: last IRQ source (inclusive) to set/clear
13072 * @set: true == set the bits, false == clear the bits
13074 * If first == last, set the exact source.
13076 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
13082 if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
13088 for (src = first; src <= last; src++) {
13089 bit = src % BITS_PER_REGISTER;
13090 /* wrapped to next register? */
13091 if (!bit && bits) {
13092 read_mod_write(dd, src - 1, bits, set);
13095 bits |= BIT_ULL(bit);
13097 read_mod_write(dd, last, bits, set);
13103 * Clear all interrupt sources on the chip.
13105 void clear_all_interrupts(struct hfi1_devdata *dd)
13109 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13110 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13112 write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13113 write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13114 write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13115 write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13116 write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13117 write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13118 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13119 for (i = 0; i < chip_send_contexts(dd); i++)
13120 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13121 for (i = 0; i < chip_sdma_engines(dd); i++)
13122 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13124 write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13125 write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13126 write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13130 * Remap the interrupt source from the general handler to the given MSI-X
13133 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13138 /* clear from the handled mask of the general interrupt */
13141 if (likely(m < CCE_NUM_INT_CSRS)) {
13142 dd->gi_mask[m] &= ~((u64)1 << n);
13144 dd_dev_err(dd, "remap interrupt err\n");
13148 /* direct the chip source to the given MSI-X interrupt */
13151 reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13152 reg &= ~((u64)0xff << (8 * n));
13153 reg |= ((u64)msix_intr & 0xff) << (8 * n);
13154 write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13157 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
13160 * SDMA engine interrupt sources grouped by type, rather than
13161 * engine. Per-engine interrupts are as follows:
13166 remap_intr(dd, IS_SDMA_START + engine, msix_intr);
13167 remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
13168 remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
13172 * Set the general handler to accept all interrupts, remap all
13173 * chip interrupts back to MSI-X 0.
13175 void reset_interrupts(struct hfi1_devdata *dd)
13179 /* all interrupts handled by the general handler */
13180 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13181 dd->gi_mask[i] = ~(u64)0;
13183 /* all chip interrupts map to MSI-X 0 */
13184 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13185 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13189 * set_up_interrupts() - Initialize the IRQ resources and state
13190 * @dd: valid devdata
13193 static int set_up_interrupts(struct hfi1_devdata *dd)
13197 /* mask all interrupts */
13198 set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
13200 /* clear all pending interrupts */
13201 clear_all_interrupts(dd);
13203 /* reset general handler mask, chip MSI-X mappings */
13204 reset_interrupts(dd);
13206 /* ask for MSI-X interrupts */
13207 ret = msix_initialize(dd);
13211 ret = msix_request_irqs(dd);
13213 msix_clean_up_interrupts(dd);
13219 * Set up context values in dd. Sets:
13221 * num_rcv_contexts - number of contexts being used
13222 * n_krcv_queues - number of kernel contexts
13223 * first_dyn_alloc_ctxt - first dynamically allocated context
13224 * in array of contexts
13225 * freectxts - number of free user contexts
13226 * num_send_contexts - number of PIO send contexts being used
13227 * num_vnic_contexts - number of contexts reserved for VNIC
13229 static int set_up_context_variables(struct hfi1_devdata *dd)
13231 unsigned long num_kernel_contexts;
13232 u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13233 int total_contexts;
13237 int user_rmt_reduced;
13239 u32 send_contexts = chip_send_contexts(dd);
13240 u32 rcv_contexts = chip_rcv_contexts(dd);
13243 * Kernel receive contexts:
13244 * - Context 0 - control context (VL15/multicast/error)
13245 * - Context 1 - first kernel context
13246 * - Context 2 - second kernel context
13251 * n_krcvqs is the sum of module parameter kernel receive
13252 * contexts, krcvqs[]. It does not include the control
13253 * context, so add that.
13255 num_kernel_contexts = n_krcvqs + 1;
13257 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13259 * Every kernel receive context needs an ACK send context.
13260 * one send context is allocated for each VL{0-7} and VL15
13262 if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13264 "Reducing # kernel rcv contexts to: %d, from %lu\n",
13265 send_contexts - num_vls - 1,
13266 num_kernel_contexts);
13267 num_kernel_contexts = send_contexts - num_vls - 1;
13270 /* Accommodate VNIC contexts if possible */
13271 if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
13272 dd_dev_err(dd, "No receive contexts available for VNIC\n");
13273 num_vnic_contexts = 0;
13275 total_contexts = num_kernel_contexts + num_vnic_contexts;
13279 * - default to 1 user context per real (non-HT) CPU core if
13280 * num_user_contexts is negative
13282 if (num_user_contexts < 0)
13283 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13285 n_usr_ctxts = num_user_contexts;
13287 * Adjust the counts given a global max.
13289 if (total_contexts + n_usr_ctxts > rcv_contexts) {
13291 "Reducing # user receive contexts to: %d, from %u\n",
13292 rcv_contexts - total_contexts,
13295 n_usr_ctxts = rcv_contexts - total_contexts;
13299 * The RMT entries are currently allocated as shown below:
13300 * 1. QOS (0 to 128 entries);
13301 * 2. FECN (num_kernel_context - 1 + num_user_contexts +
13302 * num_vnic_contexts);
13303 * 3. VNIC (num_vnic_contexts).
13304 * It should be noted that FECN oversubscribe num_vnic_contexts
13305 * entries of RMT because both VNIC and PSM could allocate any receive
13306 * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
13307 * and PSM FECN must reserve an RMT entry for each possible PSM receive
13310 rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
13311 if (HFI1_CAP_IS_KSET(TID_RDMA))
13312 rmt_count += num_kernel_contexts - 1;
13313 if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13314 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13316 "RMT size is reducing the number of user receive contexts from %u to %d\n",
13320 n_usr_ctxts = user_rmt_reduced;
13323 total_contexts += n_usr_ctxts;
13325 /* the first N are kernel contexts, the rest are user/vnic contexts */
13326 dd->num_rcv_contexts = total_contexts;
13327 dd->n_krcv_queues = num_kernel_contexts;
13328 dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13329 dd->num_vnic_contexts = num_vnic_contexts;
13330 dd->num_user_contexts = n_usr_ctxts;
13331 dd->freectxts = n_usr_ctxts;
13333 "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13335 (int)dd->num_rcv_contexts,
13336 (int)dd->n_krcv_queues,
13337 dd->num_vnic_contexts,
13338 dd->num_user_contexts);
13341 * Receive array allocation:
13342 * All RcvArray entries are divided into groups of 8. This
13343 * is required by the hardware and will speed up writes to
13344 * consecutive entries by using write-combining of the entire
13347 * The number of groups are evenly divided among all contexts.
13348 * any left over groups will be given to the first N user
13351 dd->rcv_entries.group_size = RCV_INCREMENT;
13352 ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13353 dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13354 dd->rcv_entries.nctxt_extra = ngroups -
13355 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13356 dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13357 dd->rcv_entries.ngroups,
13358 dd->rcv_entries.nctxt_extra);
13359 if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13360 MAX_EAGER_ENTRIES * 2) {
13361 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13362 dd->rcv_entries.group_size;
13364 "RcvArray group count too high, change to %u\n",
13365 dd->rcv_entries.ngroups);
13366 dd->rcv_entries.nctxt_extra = 0;
13369 * PIO send contexts
13371 ret = init_sc_pools_and_sizes(dd);
13372 if (ret >= 0) { /* success */
13373 dd->num_send_contexts = ret;
13376 "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13378 dd->num_send_contexts,
13379 dd->sc_sizes[SC_KERNEL].count,
13380 dd->sc_sizes[SC_ACK].count,
13381 dd->sc_sizes[SC_USER].count,
13382 dd->sc_sizes[SC_VL15].count);
13383 ret = 0; /* success */
13390 * Set the device/port partition key table. The MAD code
13391 * will ensure that, at least, the partial management
13392 * partition key is present in the table.
13394 static void set_partition_keys(struct hfi1_pportdata *ppd)
13396 struct hfi1_devdata *dd = ppd->dd;
13400 dd_dev_info(dd, "Setting partition keys\n");
13401 for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13402 reg |= (ppd->pkeys[i] &
13403 RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13405 RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13406 /* Each register holds 4 PKey values. */
13407 if ((i % 4) == 3) {
13408 write_csr(dd, RCV_PARTITION_KEY +
13409 ((i - 3) * 2), reg);
13414 /* Always enable HW pkeys check when pkeys table is set */
13415 add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13419 * These CSRs and memories are uninitialized on reset and must be
13420 * written before reading to set the ECC/parity bits.
13422 * NOTE: All user context CSRs that are not mmaped write-only
13423 * (e.g. the TID flows) must be initialized even if the driver never
13426 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13431 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13432 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13434 /* SendCtxtCreditReturnAddr */
13435 for (i = 0; i < chip_send_contexts(dd); i++)
13436 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13438 /* PIO Send buffers */
13439 /* SDMA Send buffers */
13441 * These are not normally read, and (presently) have no method
13442 * to be read, so are not pre-initialized
13446 /* RcvHdrTailAddr */
13447 /* RcvTidFlowTable */
13448 for (i = 0; i < chip_rcv_contexts(dd); i++) {
13449 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13450 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13451 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13452 write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13456 for (i = 0; i < chip_rcv_array_count(dd); i++)
13457 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13459 /* RcvQPMapTable */
13460 for (i = 0; i < 32; i++)
13461 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13465 * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13467 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13470 unsigned long timeout;
13473 /* is the condition present? */
13474 reg = read_csr(dd, CCE_STATUS);
13475 if ((reg & status_bits) == 0)
13478 /* clear the condition */
13479 write_csr(dd, CCE_CTRL, ctrl_bits);
13481 /* wait for the condition to clear */
13482 timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13484 reg = read_csr(dd, CCE_STATUS);
13485 if ((reg & status_bits) == 0)
13487 if (time_after(jiffies, timeout)) {
13489 "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13490 status_bits, reg & status_bits);
13497 /* set CCE CSRs to chip reset defaults */
13498 static void reset_cce_csrs(struct hfi1_devdata *dd)
13502 /* CCE_REVISION read-only */
13503 /* CCE_REVISION2 read-only */
13504 /* CCE_CTRL - bits clear automatically */
13505 /* CCE_STATUS read-only, use CceCtrl to clear */
13506 clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13507 clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13508 clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13509 for (i = 0; i < CCE_NUM_SCRATCH; i++)
13510 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13511 /* CCE_ERR_STATUS read-only */
13512 write_csr(dd, CCE_ERR_MASK, 0);
13513 write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13514 /* CCE_ERR_FORCE leave alone */
13515 for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13516 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13517 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13518 /* CCE_PCIE_CTRL leave alone */
13519 for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13520 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13521 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13522 CCE_MSIX_TABLE_UPPER_RESETCSR);
13524 for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13525 /* CCE_MSIX_PBA read-only */
13526 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13527 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13529 for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13530 write_csr(dd, CCE_INT_MAP, 0);
13531 for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13532 /* CCE_INT_STATUS read-only */
13533 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13534 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13535 /* CCE_INT_FORCE leave alone */
13536 /* CCE_INT_BLOCKED read-only */
13538 for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13539 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13542 /* set MISC CSRs to chip reset defaults */
13543 static void reset_misc_csrs(struct hfi1_devdata *dd)
13547 for (i = 0; i < 32; i++) {
13548 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13549 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13550 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13553 * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13554 * only be written 128-byte chunks
13556 /* init RSA engine to clear lingering errors */
13557 write_csr(dd, MISC_CFG_RSA_CMD, 1);
13558 write_csr(dd, MISC_CFG_RSA_MU, 0);
13559 write_csr(dd, MISC_CFG_FW_CTRL, 0);
13560 /* MISC_STS_8051_DIGEST read-only */
13561 /* MISC_STS_SBM_DIGEST read-only */
13562 /* MISC_STS_PCIE_DIGEST read-only */
13563 /* MISC_STS_FAB_DIGEST read-only */
13564 /* MISC_ERR_STATUS read-only */
13565 write_csr(dd, MISC_ERR_MASK, 0);
13566 write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13567 /* MISC_ERR_FORCE leave alone */
13570 /* set TXE CSRs to chip reset defaults */
13571 static void reset_txe_csrs(struct hfi1_devdata *dd)
13578 write_csr(dd, SEND_CTRL, 0);
13579 __cm_reset(dd, 0); /* reset CM internal state */
13580 /* SEND_CONTEXTS read-only */
13581 /* SEND_DMA_ENGINES read-only */
13582 /* SEND_PIO_MEM_SIZE read-only */
13583 /* SEND_DMA_MEM_SIZE read-only */
13584 write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13585 pio_reset_all(dd); /* SEND_PIO_INIT_CTXT */
13586 /* SEND_PIO_ERR_STATUS read-only */
13587 write_csr(dd, SEND_PIO_ERR_MASK, 0);
13588 write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13589 /* SEND_PIO_ERR_FORCE leave alone */
13590 /* SEND_DMA_ERR_STATUS read-only */
13591 write_csr(dd, SEND_DMA_ERR_MASK, 0);
13592 write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13593 /* SEND_DMA_ERR_FORCE leave alone */
13594 /* SEND_EGRESS_ERR_STATUS read-only */
13595 write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13596 write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13597 /* SEND_EGRESS_ERR_FORCE leave alone */
13598 write_csr(dd, SEND_BTH_QP, 0);
13599 write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13600 write_csr(dd, SEND_SC2VLT0, 0);
13601 write_csr(dd, SEND_SC2VLT1, 0);
13602 write_csr(dd, SEND_SC2VLT2, 0);
13603 write_csr(dd, SEND_SC2VLT3, 0);
13604 write_csr(dd, SEND_LEN_CHECK0, 0);
13605 write_csr(dd, SEND_LEN_CHECK1, 0);
13606 /* SEND_ERR_STATUS read-only */
13607 write_csr(dd, SEND_ERR_MASK, 0);
13608 write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13609 /* SEND_ERR_FORCE read-only */
13610 for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13611 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13612 for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13613 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13614 for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13615 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13616 for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13617 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13618 for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13619 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13620 write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13621 write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13622 /* SEND_CM_CREDIT_USED_STATUS read-only */
13623 write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13624 write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13625 write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13626 write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13627 write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13628 for (i = 0; i < TXE_NUM_DATA_VL; i++)
13629 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13630 write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13631 /* SEND_CM_CREDIT_USED_VL read-only */
13632 /* SEND_CM_CREDIT_USED_VL15 read-only */
13633 /* SEND_EGRESS_CTXT_STATUS read-only */
13634 /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13635 write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13636 /* SEND_EGRESS_ERR_INFO read-only */
13637 /* SEND_EGRESS_ERR_SOURCE read-only */
13640 * TXE Per-Context CSRs
13642 for (i = 0; i < chip_send_contexts(dd); i++) {
13643 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13644 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13645 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13646 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13647 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13648 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13649 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13650 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13651 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13652 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13653 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13654 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13658 * TXE Per-SDMA CSRs
13660 for (i = 0; i < chip_sdma_engines(dd); i++) {
13661 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13662 /* SEND_DMA_STATUS read-only */
13663 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13664 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13665 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13666 /* SEND_DMA_HEAD read-only */
13667 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13668 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13669 /* SEND_DMA_IDLE_CNT read-only */
13670 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13671 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13672 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13673 /* SEND_DMA_ENG_ERR_STATUS read-only */
13674 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13675 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13676 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13677 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13678 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13679 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13680 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13681 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13682 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13683 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13689 * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13691 static void init_rbufs(struct hfi1_devdata *dd)
13697 * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13702 reg = read_csr(dd, RCV_STATUS);
13703 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13704 | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13707 * Give up after 1ms - maximum wait time.
13709 * RBuf size is 136KiB. Slowest possible is PCIe Gen1 x1 at
13710 * 250MB/s bandwidth. Lower rate to 66% for overhead to get:
13711 * 136 KB / (66% * 250MB/s) = 844us
13713 if (count++ > 500) {
13715 "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13719 udelay(2); /* do not busy-wait the CSR */
13722 /* start the init - expect RcvCtrl to be 0 */
13723 write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13726 * Read to force the write of Rcvtrl.RxRbufInit. There is a brief
13727 * period after the write before RcvStatus.RxRbufInitDone is valid.
13728 * The delay in the first run through the loop below is sufficient and
13729 * required before the first read of RcvStatus.RxRbufInintDone.
13731 read_csr(dd, RCV_CTRL);
13733 /* wait for the init to finish */
13736 /* delay is required first time through - see above */
13737 udelay(2); /* do not busy-wait the CSR */
13738 reg = read_csr(dd, RCV_STATUS);
13739 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13742 /* give up after 100us - slowest possible at 33MHz is 73us */
13743 if (count++ > 50) {
13745 "%s: RcvStatus.RxRbufInit not set, continuing\n",
13752 /* set RXE CSRs to chip reset defaults */
13753 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13760 write_csr(dd, RCV_CTRL, 0);
13762 /* RCV_STATUS read-only */
13763 /* RCV_CONTEXTS read-only */
13764 /* RCV_ARRAY_CNT read-only */
13765 /* RCV_BUF_SIZE read-only */
13766 write_csr(dd, RCV_BTH_QP, 0);
13767 write_csr(dd, RCV_MULTICAST, 0);
13768 write_csr(dd, RCV_BYPASS, 0);
13769 write_csr(dd, RCV_VL15, 0);
13770 /* this is a clear-down */
13771 write_csr(dd, RCV_ERR_INFO,
13772 RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13773 /* RCV_ERR_STATUS read-only */
13774 write_csr(dd, RCV_ERR_MASK, 0);
13775 write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13776 /* RCV_ERR_FORCE leave alone */
13777 for (i = 0; i < 32; i++)
13778 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13779 for (i = 0; i < 4; i++)
13780 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13781 for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13782 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13783 for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13784 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13785 for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13786 clear_rsm_rule(dd, i);
13787 for (i = 0; i < 32; i++)
13788 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13791 * RXE Kernel and User Per-Context CSRs
13793 for (i = 0; i < chip_rcv_contexts(dd); i++) {
13795 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13796 /* RCV_CTXT_STATUS read-only */
13797 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13798 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13799 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13800 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13801 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13802 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13803 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13804 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13805 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13806 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13809 /* RCV_HDR_TAIL read-only */
13810 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13811 /* RCV_EGR_INDEX_TAIL read-only */
13812 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13813 /* RCV_EGR_OFFSET_TAIL read-only */
13814 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13815 write_uctxt_csr(dd, i,
13816 RCV_TID_FLOW_TABLE + (8 * j), 0);
13822 * Set sc2vl tables.
13824 * They power on to zeros, so to avoid send context errors
13825 * they need to be set:
13827 * SC 0-7 -> VL 0-7 (respectively)
13832 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13835 /* init per architecture spec, constrained by hardware capability */
13837 /* HFI maps sent packets */
13838 write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13844 write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13850 write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13856 write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13863 /* DC maps received packets */
13864 write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13866 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7,
13867 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13868 write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13870 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13871 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13873 /* initialize the cached sc2vl values consistently with h/w */
13874 for (i = 0; i < 32; i++) {
13875 if (i < 8 || i == 15)
13876 *((u8 *)(dd->sc2vl) + i) = (u8)i;
13878 *((u8 *)(dd->sc2vl) + i) = 0;
13883 * Read chip sizes and then reset parts to sane, disabled, values. We cannot
13884 * depend on the chip going through a power-on reset - a driver may be loaded
13885 * and unloaded many times.
13887 * Do not write any CSR values to the chip in this routine - there may be
13888 * a reset following the (possible) FLR in this routine.
13891 static int init_chip(struct hfi1_devdata *dd)
13897 * Put the HFI CSRs in a known state.
13898 * Combine this with a DC reset.
13900 * Stop the device from doing anything while we do a
13901 * reset. We know there are no other active users of
13902 * the device since we are now in charge. Turn off
13903 * off all outbound and inbound traffic and make sure
13904 * the device does not generate any interrupts.
13907 /* disable send contexts and SDMA engines */
13908 write_csr(dd, SEND_CTRL, 0);
13909 for (i = 0; i < chip_send_contexts(dd); i++)
13910 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13911 for (i = 0; i < chip_sdma_engines(dd); i++)
13912 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13913 /* disable port (turn off RXE inbound traffic) and contexts */
13914 write_csr(dd, RCV_CTRL, 0);
13915 for (i = 0; i < chip_rcv_contexts(dd); i++)
13916 write_csr(dd, RCV_CTXT_CTRL, 0);
13917 /* mask all interrupt sources */
13918 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13919 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13922 * DC Reset: do a full DC reset before the register clear.
13923 * A recommended length of time to hold is one CSR read,
13924 * so reread the CceDcCtrl. Then, hold the DC in reset
13925 * across the clear.
13927 write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13928 (void)read_csr(dd, CCE_DC_CTRL);
13932 * A FLR will reset the SPC core and part of the PCIe.
13933 * The parts that need to be restored have already been
13936 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13938 /* do the FLR, the DC reset will remain */
13939 pcie_flr(dd->pcidev);
13941 /* restore command and BARs */
13942 ret = restore_pci_variables(dd);
13944 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13950 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13951 pcie_flr(dd->pcidev);
13952 ret = restore_pci_variables(dd);
13954 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13960 dd_dev_info(dd, "Resetting CSRs with writes\n");
13961 reset_cce_csrs(dd);
13962 reset_txe_csrs(dd);
13963 reset_rxe_csrs(dd);
13964 reset_misc_csrs(dd);
13966 /* clear the DC reset */
13967 write_csr(dd, CCE_DC_CTRL, 0);
13969 /* Set the LED off */
13973 * Clear the QSFP reset.
13974 * An FLR enforces a 0 on all out pins. The driver does not touch
13975 * ASIC_QSFPn_OUT otherwise. This leaves RESET_N low and
13976 * anything plugged constantly in reset, if it pays attention
13978 * Prime examples of this are optical cables. Set all pins high.
13979 * I2CCLK and I2CDAT will change per direction, and INT_N and
13980 * MODPRS_N are input only and their value is ignored.
13982 write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13983 write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13984 init_chip_resources(dd);
13988 static void init_early_variables(struct hfi1_devdata *dd)
13992 /* assign link credit variables */
13994 dd->link_credits = CM_GLOBAL_CREDITS;
13996 dd->link_credits--;
13997 dd->vcu = cu_to_vcu(hfi1_cu);
13998 /* enough room for 8 MAD packets plus header - 17K */
13999 dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14000 if (dd->vl15_init > dd->link_credits)
14001 dd->vl15_init = dd->link_credits;
14003 write_uninitialized_csrs_and_memories(dd);
14005 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14006 for (i = 0; i < dd->num_pports; i++) {
14007 struct hfi1_pportdata *ppd = &dd->pport[i];
14009 set_partition_keys(ppd);
14011 init_sc2vl_tables(dd);
14014 static void init_kdeth_qp(struct hfi1_devdata *dd)
14016 /* user changed the KDETH_QP */
14017 if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14018 /* out of range or illegal value */
14019 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14022 if (kdeth_qp == 0) /* not set, or failed range check */
14023 kdeth_qp = DEFAULT_KDETH_QP;
14025 write_csr(dd, SEND_BTH_QP,
14026 (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14027 SEND_BTH_QP_KDETH_QP_SHIFT);
14029 write_csr(dd, RCV_BTH_QP,
14030 (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14031 RCV_BTH_QP_KDETH_QP_SHIFT);
14037 * @idx: index to read
14039 u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
14041 u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
14043 reg >>= (idx % 8) * 8;
14049 * @dd - device data
14050 * @first_ctxt - first context
14051 * @last_ctxt - first context
14053 * This return sets the qpn mapping table that
14054 * is indexed by qpn[8:1].
14056 * The routine will round robin the 256 settings
14057 * from first_ctxt to last_ctxt.
14059 * The first/last looks ahead to having specialized
14060 * receive contexts for mgmt and bypass. Normal
14061 * verbs traffic will assumed to be on a range
14062 * of receive contexts.
14064 static void init_qpmap_table(struct hfi1_devdata *dd,
14069 u64 regno = RCV_QP_MAP_TABLE;
14071 u64 ctxt = first_ctxt;
14073 for (i = 0; i < 256; i++) {
14074 reg |= ctxt << (8 * (i % 8));
14076 if (ctxt > last_ctxt)
14079 write_csr(dd, regno, reg);
14085 add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14086 | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14089 struct rsm_map_table {
14090 u64 map[NUM_MAP_REGS];
14094 struct rsm_rule_data {
14110 * Return an initialized RMT map table for users to fill in. OK if it
14111 * returns NULL, indicating no table.
14113 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14115 struct rsm_map_table *rmt;
14116 u8 rxcontext = is_ax(dd) ? 0 : 0xff; /* 0 is default if a0 ver. */
14118 rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14120 memset(rmt->map, rxcontext, sizeof(rmt->map));
14128 * Write the final RMT map table to the chip and free the table. OK if
14131 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14132 struct rsm_map_table *rmt)
14137 /* write table to chip */
14138 for (i = 0; i < NUM_MAP_REGS; i++)
14139 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14142 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14147 * Add a receive side mapping rule.
14149 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14150 struct rsm_rule_data *rrd)
14152 write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14153 (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14154 1ull << rule_index | /* enable bit */
14155 (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14156 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14157 (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14158 (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14159 (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14160 (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14161 (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14162 (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14163 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14164 (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14165 (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14166 (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14167 (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14171 * Clear a receive side mapping rule.
14173 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14175 write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14176 write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14177 write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14180 /* return the number of RSM map table entries that will be used for QOS */
14181 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14188 /* is QOS active at all? */
14189 if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14194 /* determine bits for qpn */
14195 for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14196 if (krcvqs[i] > max_by_vl)
14197 max_by_vl = krcvqs[i];
14198 if (max_by_vl > 32)
14200 m = ilog2(__roundup_pow_of_two(max_by_vl));
14202 /* determine bits for vl */
14203 n = ilog2(__roundup_pow_of_two(num_vls));
14205 /* reject if too much is used */
14214 return 1 << (m + n);
14225 * init_qos - init RX qos
14226 * @dd - device data
14227 * @rmt - RSM map table
14229 * This routine initializes Rule 0 and the RSM map table to implement
14230 * quality of service (qos).
14232 * If all of the limit tests succeed, qos is applied based on the array
14233 * interpretation of krcvqs where entry 0 is VL0.
14235 * The number of vl bits (n) and the number of qpn bits (m) are computed to
14236 * feed both the RSM map table and the single rule.
14238 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14240 struct rsm_rule_data rrd;
14241 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14242 unsigned int rmt_entries;
14247 rmt_entries = qos_rmt_entries(dd, &m, &n);
14248 if (rmt_entries == 0)
14250 qpns_per_vl = 1 << m;
14252 /* enough room in the map table? */
14253 rmt_entries = 1 << (m + n);
14254 if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14257 /* add qos entries to the the RSM map table */
14258 for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14261 for (qpn = 0, tctxt = ctxt;
14262 krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14263 unsigned idx, regoff, regidx;
14265 /* generate the index the hardware will produce */
14266 idx = rmt->used + ((qpn << n) ^ i);
14267 regoff = (idx % 8) * 8;
14269 /* replace default with context number */
14270 reg = rmt->map[regidx];
14271 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14273 reg |= (u64)(tctxt++) << regoff;
14274 rmt->map[regidx] = reg;
14275 if (tctxt == ctxt + krcvqs[i])
14281 rrd.offset = rmt->used;
14283 rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14284 rrd.field2_off = LRH_SC_MATCH_OFFSET;
14285 rrd.index1_off = LRH_SC_SELECT_OFFSET;
14286 rrd.index1_width = n;
14287 rrd.index2_off = QPN_SELECT_OFFSET;
14288 rrd.index2_width = m + n;
14289 rrd.mask1 = LRH_BTH_MASK;
14290 rrd.value1 = LRH_BTH_VALUE;
14291 rrd.mask2 = LRH_SC_MASK;
14292 rrd.value2 = LRH_SC_VALUE;
14295 add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14297 /* mark RSM map entries as used */
14298 rmt->used += rmt_entries;
14299 /* map everything else to the mcast/err/vl15 context */
14300 init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14301 dd->qos_shift = n + 1;
14305 init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14308 static void init_fecn_handling(struct hfi1_devdata *dd,
14309 struct rsm_map_table *rmt)
14311 struct rsm_rule_data rrd;
14313 int i, idx, regoff, regidx, start;
14317 if (HFI1_CAP_IS_KSET(TID_RDMA))
14318 /* Exclude context 0 */
14321 start = dd->first_dyn_alloc_ctxt;
14323 total_cnt = dd->num_rcv_contexts - start;
14325 /* there needs to be enough room in the map table */
14326 if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14327 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
14332 * RSM will extract the destination context as an index into the
14333 * map table. The destination contexts are a sequential block
14334 * in the range start...num_rcv_contexts-1 (inclusive).
14335 * Map entries are accessed as offset + extracted value. Adjust
14336 * the added offset so this sequence can be placed anywhere in
14337 * the table - as long as the entries themselves do not wrap.
14338 * There are only enough bits in offset for the table size, so
14339 * start with that to allow for a "negative" offset.
14341 offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
14343 for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
14345 /* replace with identity mapping */
14346 regoff = (idx % 8) * 8;
14348 reg = rmt->map[regidx];
14349 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14350 reg |= (u64)i << regoff;
14351 rmt->map[regidx] = reg;
14355 * For RSM intercept of Expected FECN packets:
14356 * o packet type 0 - expected
14357 * o match on F (bit 95), using select/match 1, and
14358 * o match on SH (bit 133), using select/match 2.
14360 * Use index 1 to extract the 8-bit receive context from DestQP
14361 * (start at bit 64). Use that as the RSM map table index.
14363 rrd.offset = offset;
14365 rrd.field1_off = 95;
14366 rrd.field2_off = 133;
14367 rrd.index1_off = 64;
14368 rrd.index1_width = 8;
14369 rrd.index2_off = 0;
14370 rrd.index2_width = 0;
14377 add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14379 rmt->used += total_cnt;
14382 /* Initialize RSM for VNIC */
14383 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14389 struct rsm_rule_data rrd;
14391 if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14392 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14393 dd->vnic.rmt_start);
14397 dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14398 dd->vnic.rmt_start,
14399 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14401 /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14402 regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14403 reg = read_csr(dd, regoff);
14404 for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14405 /* Update map register with vnic context */
14406 j = (dd->vnic.rmt_start + i) % 8;
14407 reg &= ~(0xffllu << (j * 8));
14408 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14409 /* Wrap up vnic ctx index */
14410 ctx_id %= dd->vnic.num_ctxt;
14411 /* Write back map register */
14412 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14413 dev_dbg(&(dd)->pcidev->dev,
14414 "Vnic rsm map reg[%d] =0x%llx\n",
14415 regoff - RCV_RSM_MAP_TABLE, reg);
14417 write_csr(dd, regoff, reg);
14419 if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14420 reg = read_csr(dd, regoff);
14424 /* Add rule for vnic */
14425 rrd.offset = dd->vnic.rmt_start;
14427 /* Match 16B packets */
14428 rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14429 rrd.mask1 = L2_TYPE_MASK;
14430 rrd.value1 = L2_16B_VALUE;
14431 /* Match ETH L4 packets */
14432 rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14433 rrd.mask2 = L4_16B_TYPE_MASK;
14434 rrd.value2 = L4_16B_ETH_VALUE;
14435 /* Calc context from veswid and entropy */
14436 rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14437 rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14438 rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14439 rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14440 add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14442 /* Enable RSM if not already enabled */
14443 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14446 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14448 clear_rsm_rule(dd, RSM_INS_VNIC);
14450 /* Disable RSM if used only by vnic */
14451 if (dd->vnic.rmt_start == 0)
14452 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14455 static int init_rxe(struct hfi1_devdata *dd)
14457 struct rsm_map_table *rmt;
14460 /* enable all receive errors */
14461 write_csr(dd, RCV_ERR_MASK, ~0ull);
14463 rmt = alloc_rsm_map_table(dd);
14467 /* set up QOS, including the QPN map table */
14469 init_fecn_handling(dd, rmt);
14470 complete_rsm_map_table(dd, rmt);
14471 /* record number of used rsm map entries for vnic */
14472 dd->vnic.rmt_start = rmt->used;
14476 * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14477 * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14478 * space, PciCfgCap2.MaxPayloadSize in HFI). There is only one
14479 * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14480 * Max_PayLoad_Size set to its minimum of 128.
14482 * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14483 * (64 bytes). Max_Payload_Size is possibly modified upward in
14484 * tune_pcie_caps() which is called after this routine.
14487 /* Have 16 bytes (4DW) of bypass header available in header queue */
14488 val = read_csr(dd, RCV_BYPASS);
14489 val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14490 val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14491 RCV_BYPASS_HDR_SIZE_SHIFT);
14492 write_csr(dd, RCV_BYPASS, val);
14496 static void init_other(struct hfi1_devdata *dd)
14498 /* enable all CCE errors */
14499 write_csr(dd, CCE_ERR_MASK, ~0ull);
14500 /* enable *some* Misc errors */
14501 write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14502 /* enable all DC errors, except LCB */
14503 write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14504 write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14508 * Fill out the given AU table using the given CU. A CU is defined in terms
14509 * AUs. The table is a an encoding: given the index, how many AUs does that
14512 * NOTE: Assumes that the register layout is the same for the
14513 * local and remote tables.
14515 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14516 u32 csr0to3, u32 csr4to7)
14518 write_csr(dd, csr0to3,
14519 0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14520 1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14522 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14524 SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14525 write_csr(dd, csr4to7,
14527 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14529 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14531 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14533 SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14536 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14538 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14539 SEND_CM_LOCAL_AU_TABLE4_TO7);
14542 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14544 assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14545 SEND_CM_REMOTE_AU_TABLE4_TO7);
14548 static void init_txe(struct hfi1_devdata *dd)
14552 /* enable all PIO, SDMA, general, and Egress errors */
14553 write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14554 write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14555 write_csr(dd, SEND_ERR_MASK, ~0ull);
14556 write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14558 /* enable all per-context and per-SDMA engine errors */
14559 for (i = 0; i < chip_send_contexts(dd); i++)
14560 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14561 for (i = 0; i < chip_sdma_engines(dd); i++)
14562 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14564 /* set the local CU to AU mapping */
14565 assign_local_cm_au_table(dd, dd->vcu);
14568 * Set reasonable default for Credit Return Timer
14569 * Don't set on Simulator - causes it to choke.
14571 if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14572 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14575 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14581 if (!rcd || !rcd->sc)
14584 hw_ctxt = rcd->sc->hw_context;
14585 reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14586 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14587 SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14588 /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14589 if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14590 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14591 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14593 * Enable send-side J_KEY integrity check, unless this is A0 h/w
14596 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14597 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14598 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14601 /* Enable J_KEY check on receive context. */
14602 reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14603 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14604 RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14605 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14610 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14615 if (!rcd || !rcd->sc)
14618 hw_ctxt = rcd->sc->hw_context;
14619 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14621 * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14622 * This check would not have been enabled for A0 h/w, see
14626 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14627 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14628 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14630 /* Turn off the J_KEY on the receive side */
14631 write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14636 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14642 if (!rcd || !rcd->sc)
14645 hw_ctxt = rcd->sc->hw_context;
14646 reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14647 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14648 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14649 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14650 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14651 reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14652 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14657 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14662 if (!ctxt || !ctxt->sc)
14665 hw_ctxt = ctxt->sc->hw_context;
14666 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14667 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14668 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14669 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14675 * Start doing the clean up the the chip. Our clean up happens in multiple
14676 * stages and this is just the first.
14678 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14683 finish_chip_resources(dd);
14686 #define HFI_BASE_GUID(dev) \
14687 ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14690 * Information can be shared between the two HFIs on the same ASIC
14691 * in the same OS. This function finds the peer device and sets
14692 * up a shared structure.
14694 static int init_asic_data(struct hfi1_devdata *dd)
14696 unsigned long index;
14697 struct hfi1_devdata *peer;
14698 struct hfi1_asic_data *asic_data;
14701 /* pre-allocate the asic structure in case we are the first device */
14702 asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14706 xa_lock_irq(&hfi1_dev_table);
14707 /* Find our peer device */
14708 xa_for_each(&hfi1_dev_table, index, peer) {
14709 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
14710 dd->unit != peer->unit)
14715 /* use already allocated structure */
14716 dd->asic_data = peer->asic_data;
14719 dd->asic_data = asic_data;
14720 mutex_init(&dd->asic_data->asic_resource_mutex);
14722 dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14723 xa_unlock_irq(&hfi1_dev_table);
14725 /* first one through - set up i2c devices */
14727 ret = set_up_i2c(dd, dd->asic_data);
14733 * Set dd->boardname. Use a generic name if a name is not returned from
14734 * EFI variable space.
14736 * Return 0 on success, -ENOMEM if space could not be allocated.
14738 static int obtain_boardname(struct hfi1_devdata *dd)
14740 /* generic board description */
14741 const char generic[] =
14742 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14743 unsigned long size;
14746 ret = read_hfi1_efi_var(dd, "description", &size,
14747 (void **)&dd->boardname);
14749 dd_dev_info(dd, "Board description not found\n");
14750 /* use generic description */
14751 dd->boardname = kstrdup(generic, GFP_KERNEL);
14752 if (!dd->boardname)
14759 * Check the interrupt registers to make sure that they are mapped correctly.
14760 * It is intended to help user identify any mismapping by VMM when the driver
14761 * is running in a VM. This function should only be called before interrupt
14762 * is set up properly.
14764 * Return 0 on success, -EINVAL on failure.
14766 static int check_int_registers(struct hfi1_devdata *dd)
14769 u64 all_bits = ~(u64)0;
14772 /* Clear CceIntMask[0] to avoid raising any interrupts */
14773 mask = read_csr(dd, CCE_INT_MASK);
14774 write_csr(dd, CCE_INT_MASK, 0ull);
14775 reg = read_csr(dd, CCE_INT_MASK);
14779 /* Clear all interrupt status bits */
14780 write_csr(dd, CCE_INT_CLEAR, all_bits);
14781 reg = read_csr(dd, CCE_INT_STATUS);
14785 /* Set all interrupt status bits */
14786 write_csr(dd, CCE_INT_FORCE, all_bits);
14787 reg = read_csr(dd, CCE_INT_STATUS);
14788 if (reg != all_bits)
14791 /* Restore the interrupt mask */
14792 write_csr(dd, CCE_INT_CLEAR, all_bits);
14793 write_csr(dd, CCE_INT_MASK, mask);
14797 write_csr(dd, CCE_INT_MASK, mask);
14798 dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14803 * hfi1_init_dd() - Initialize most of the dd structure.
14804 * @dev: the pci_dev for hfi1_ib device
14805 * @ent: pci_device_id struct for this dev
14807 * This is global, and is called directly at init to set up the
14808 * chip-specific function pointers for later use.
14810 int hfi1_init_dd(struct hfi1_devdata *dd)
14812 struct pci_dev *pdev = dd->pcidev;
14813 struct hfi1_pportdata *ppd;
14816 static const char * const inames[] = { /* implementation names */
14818 "RTL VCS simulation",
14819 "RTL FPGA emulation",
14820 "Functional simulator"
14822 struct pci_dev *parent = pdev->bus->self;
14823 u32 sdma_engines = chip_sdma_engines(dd);
14826 for (i = 0; i < dd->num_pports; i++, ppd++) {
14828 /* init common fields */
14829 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14830 /* DC supports 4 link widths */
14831 ppd->link_width_supported =
14832 OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14833 OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14834 ppd->link_width_downgrade_supported =
14835 ppd->link_width_supported;
14836 /* start out enabling only 4X */
14837 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14838 ppd->link_width_downgrade_enabled =
14839 ppd->link_width_downgrade_supported;
14840 /* link width active is 0 when link is down */
14841 /* link width downgrade active is 0 when link is down */
14843 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14844 num_vls > HFI1_MAX_VLS_SUPPORTED) {
14845 dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
14846 num_vls, HFI1_MAX_VLS_SUPPORTED);
14847 num_vls = HFI1_MAX_VLS_SUPPORTED;
14849 ppd->vls_supported = num_vls;
14850 ppd->vls_operational = ppd->vls_supported;
14851 /* Set the default MTU. */
14852 for (vl = 0; vl < num_vls; vl++)
14853 dd->vld[vl].mtu = hfi1_max_mtu;
14854 dd->vld[15].mtu = MAX_MAD_PACKET;
14856 * Set the initial values to reasonable default, will be set
14857 * for real when link is up.
14859 ppd->overrun_threshold = 0x4;
14860 ppd->phy_error_threshold = 0xf;
14861 ppd->port_crc_mode_enabled = link_crc_mask;
14862 /* initialize supported LTP CRC mode */
14863 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14864 /* initialize enabled LTP CRC mode */
14865 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14866 /* start in offline */
14867 ppd->host_link_state = HLS_DN_OFFLINE;
14868 init_vl_arb_caches(ppd);
14872 * Do remaining PCIe setup and save PCIe values in dd.
14873 * Any error printing is already done by the init code.
14874 * On return, we have the chip mapped.
14876 ret = hfi1_pcie_ddinit(dd, pdev);
14880 /* Save PCI space registers to rewrite after device reset */
14881 ret = save_pci_variables(dd);
14885 dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14886 & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14887 dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14888 & CCE_REVISION_CHIP_REV_MINOR_MASK;
14891 * Check interrupt registers mapping if the driver has no access to
14892 * the upstream component. In this case, it is likely that the driver
14893 * is running in a VM.
14896 ret = check_int_registers(dd);
14902 * obtain the hardware ID - NOT related to unit, which is a
14903 * software enumeration
14905 reg = read_csr(dd, CCE_REVISION2);
14906 dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14907 & CCE_REVISION2_HFI_ID_MASK;
14908 /* the variable size will remove unwanted bits */
14909 dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14910 dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14911 dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14912 dd->icode < ARRAY_SIZE(inames) ?
14913 inames[dd->icode] : "unknown", (int)dd->irev);
14915 /* speeds the hardware can support */
14916 dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14917 /* speeds allowed to run at */
14918 dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14919 /* give a reasonable active value, will be set on link up */
14920 dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14922 /* fix up link widths for emulation _p */
14924 if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14925 ppd->link_width_supported =
14926 ppd->link_width_enabled =
14927 ppd->link_width_downgrade_supported =
14928 ppd->link_width_downgrade_enabled =
14931 /* insure num_vls isn't larger than number of sdma engines */
14932 if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
14933 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14934 num_vls, sdma_engines);
14935 num_vls = sdma_engines;
14936 ppd->vls_supported = sdma_engines;
14937 ppd->vls_operational = ppd->vls_supported;
14941 * Convert the ns parameter to the 64 * cclocks used in the CSR.
14942 * Limit the max if larger than the field holds. If timeout is
14943 * non-zero, then the calculated field will be at least 1.
14945 * Must be after icode is set up - the cclock rate depends
14946 * on knowing the hardware being used.
14948 dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14949 if (dd->rcv_intr_timeout_csr >
14950 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14951 dd->rcv_intr_timeout_csr =
14952 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14953 else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14954 dd->rcv_intr_timeout_csr = 1;
14956 /* needs to be done before we look for the peer device */
14959 /* set up shared ASIC data with peer device */
14960 ret = init_asic_data(dd);
14964 /* obtain chip sizes, reset chip CSRs */
14965 ret = init_chip(dd);
14969 /* read in the PCIe link speed information */
14970 ret = pcie_speeds(dd);
14974 /* call before get_platform_config(), after init_chip_resources() */
14975 ret = eprom_init(dd);
14977 goto bail_free_rcverr;
14979 /* Needs to be called before hfi1_firmware_init */
14980 get_platform_config(dd);
14982 /* read in firmware */
14983 ret = hfi1_firmware_init(dd);
14988 * In general, the PCIe Gen3 transition must occur after the
14989 * chip has been idled (so it won't initiate any PCIe transactions
14990 * e.g. an interrupt) and before the driver changes any registers
14991 * (the transition will reset the registers).
14993 * In particular, place this call after:
14994 * - init_chip() - the chip will not initiate any PCIe transactions
14995 * - pcie_speeds() - reads the current link speed
14996 * - hfi1_firmware_init() - the needed firmware is ready to be
14999 ret = do_pcie_gen3_transition(dd);
15004 * This should probably occur in hfi1_pcie_init(), but historically
15005 * occurs after the do_pcie_gen3_transition() code.
15007 tune_pcie_caps(dd);
15009 /* start setting dd values and adjusting CSRs */
15010 init_early_variables(dd);
15012 parse_platform_config(dd);
15014 ret = obtain_boardname(dd);
15018 snprintf(dd->boardversion, BOARD_VERS_MAX,
15019 "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15020 HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15023 (dd->revision >> CCE_REVISION_SW_SHIFT)
15024 & CCE_REVISION_SW_MASK);
15026 ret = set_up_context_variables(dd);
15030 /* set initial RXE CSRs */
15031 ret = init_rxe(dd);
15035 /* set initial TXE CSRs */
15037 /* set initial non-RXE, non-TXE CSRs */
15039 /* set up KDETH QP prefix in both RX and TX CSRs */
15042 ret = hfi1_dev_affinity_init(dd);
15046 /* send contexts must be set up before receive contexts */
15047 ret = init_send_contexts(dd);
15051 ret = hfi1_create_kctxts(dd);
15056 * Initialize aspm, to be done after gen3 transition and setting up
15057 * contexts and before enabling interrupts
15061 ret = init_pervl_scs(dd);
15066 for (i = 0; i < dd->num_pports; ++i) {
15067 ret = sdma_init(dd, i);
15072 /* use contexts created by hfi1_create_kctxts */
15073 ret = set_up_interrupts(dd);
15077 ret = hfi1_comp_vectors_set_up(dd);
15079 goto bail_clear_intr;
15081 /* set up LCB access - must be after set_up_interrupts() */
15082 init_lcb_access(dd);
15085 * Serial number is created from the base guid:
15086 * [27:24] = base guid [38:35]
15087 * [23: 0] = base guid [23: 0]
15089 snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15090 (dd->base_guid & 0xFFFFFF) |
15091 ((dd->base_guid >> 11) & 0xF000000));
15093 dd->oui1 = dd->base_guid >> 56 & 0xFF;
15094 dd->oui2 = dd->base_guid >> 48 & 0xFF;
15095 dd->oui3 = dd->base_guid >> 40 & 0xFF;
15097 ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15099 goto bail_clear_intr;
15103 ret = init_cntrs(dd);
15105 goto bail_clear_intr;
15107 ret = init_rcverr(dd);
15109 goto bail_free_cntrs;
15111 init_completion(&dd->user_comp);
15113 /* The user refcount starts with one to inidicate an active device */
15114 atomic_set(&dd->user_refcount, 1);
15123 hfi1_comp_vectors_clean_up(dd);
15124 msix_clean_up_interrupts(dd);
15126 hfi1_pcie_ddcleanup(dd);
15128 hfi1_free_devdata(dd);
15133 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15137 u32 current_egress_rate = ppd->current_egress_rate;
15138 /* rates here are in units of 10^6 bits/sec */
15140 if (desired_egress_rate == -1)
15141 return 0; /* shouldn't happen */
15143 if (desired_egress_rate >= current_egress_rate)
15144 return 0; /* we can't help go faster, only slower */
15146 delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15147 egress_cycles(dw_len * 4, current_egress_rate);
15149 return (u16)delta_cycles;
15153 * create_pbc - build a pbc for transmission
15154 * @flags: special case flags or-ed in built pbc
15155 * @srate: static rate
15157 * @dwlen: dword length (header words + data words + pbc words)
15159 * Create a PBC with the given flags, rate, VL, and length.
15161 * NOTE: The PBC created will not insert any HCRC - all callers but one are
15162 * for verbs, which does not use this PSM feature. The lone other caller
15163 * is for the diagnostic interface which calls this if the user does not
15164 * supply their own PBC.
15166 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15169 u64 pbc, delay = 0;
15171 if (unlikely(srate_mbs))
15172 delay = delay_cycles(ppd, srate_mbs, dw_len);
15175 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15176 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15177 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15178 | (dw_len & PBC_LENGTH_DWS_MASK)
15179 << PBC_LENGTH_DWS_SHIFT;
15184 #define SBUS_THERMAL 0x4f
15185 #define SBUS_THERM_MONITOR_MODE 0x1
15187 #define THERM_FAILURE(dev, ret, reason) \
15189 "Thermal sensor initialization failed: %s (%d)\n", \
15193 * Initialize the thermal sensor.
15195 * After initialization, enable polling of thermal sensor through
15196 * SBus interface. In order for this to work, the SBus Master
15197 * firmware has to be loaded due to the fact that the HW polling
15198 * logic uses SBus interrupts, which are not supported with
15199 * default firmware. Otherwise, no data will be returned through
15200 * the ASIC_STS_THERM CSR.
15202 static int thermal_init(struct hfi1_devdata *dd)
15206 if (dd->icode != ICODE_RTL_SILICON ||
15207 check_chip_resource(dd, CR_THERM_INIT, NULL))
15210 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15212 THERM_FAILURE(dd, ret, "Acquire SBus");
15216 dd_dev_info(dd, "Initializing thermal sensor\n");
15217 /* Disable polling of thermal readings */
15218 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15220 /* Thermal Sensor Initialization */
15221 /* Step 1: Reset the Thermal SBus Receiver */
15222 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15223 RESET_SBUS_RECEIVER, 0);
15225 THERM_FAILURE(dd, ret, "Bus Reset");
15228 /* Step 2: Set Reset bit in Thermal block */
15229 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15230 WRITE_SBUS_RECEIVER, 0x1);
15232 THERM_FAILURE(dd, ret, "Therm Block Reset");
15235 /* Step 3: Write clock divider value (100MHz -> 2MHz) */
15236 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15237 WRITE_SBUS_RECEIVER, 0x32);
15239 THERM_FAILURE(dd, ret, "Write Clock Div");
15242 /* Step 4: Select temperature mode */
15243 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15244 WRITE_SBUS_RECEIVER,
15245 SBUS_THERM_MONITOR_MODE);
15247 THERM_FAILURE(dd, ret, "Write Mode Sel");
15250 /* Step 5: De-assert block reset and start conversion */
15251 ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15252 WRITE_SBUS_RECEIVER, 0x2);
15254 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15257 /* Step 5.1: Wait for first conversion (21.5ms per spec) */
15260 /* Enable polling of thermal readings */
15261 write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15263 /* Set initialized flag */
15264 ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15266 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15269 release_chip_resource(dd, CR_SBUS);
15273 static void handle_temp_err(struct hfi1_devdata *dd)
15275 struct hfi1_pportdata *ppd = &dd->pport[0];
15277 * Thermal Critical Interrupt
15278 * Put the device into forced freeze mode, take link down to
15279 * offline, and put DC into reset.
15282 "Critical temperature reached! Forcing device into freeze mode!\n");
15283 dd->flags |= HFI1_FORCED_FREEZE;
15284 start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15286 * Shut DC down as much and as quickly as possible.
15288 * Step 1: Take the link down to OFFLINE. This will cause the
15289 * 8051 to put the Serdes in reset. However, we don't want to
15290 * go through the entire link state machine since we want to
15291 * shutdown ASAP. Furthermore, this is not a graceful shutdown
15292 * but rather an attempt to save the chip.
15293 * Code below is almost the same as quiet_serdes() but avoids
15294 * all the extra work and the sleeps.
15296 ppd->driver_link_ready = 0;
15297 ppd->link_enabled = 0;
15298 set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15301 * Step 2: Shutdown LCB and 8051
15302 * After shutdown, do not restore DC_CFG_RESET value.