Merge branch 'topic/docs-next' into v4l_for_linus
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / hfi1 / chip.c
1 /*
2  * Copyright(c) 2015, 2016 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 /*
49  * This file contains all of the code that is specific to the HFI chip
50  */
51
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56
57 #include "hfi.h"
58 #include "trace.h"
59 #include "mad.h"
60 #include "pio.h"
61 #include "sdma.h"
62 #include "eprom.h"
63 #include "efivar.h"
64 #include "platform.h"
65 #include "aspm.h"
66
67 #define NUM_IB_PORTS 1
68
69 uint kdeth_qp;
70 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
71 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
72
73 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
74 module_param(num_vls, uint, S_IRUGO);
75 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
76
77 /*
78  * Default time to aggregate two 10K packets from the idle state
79  * (timer not running). The timer starts at the end of the first packet,
80  * so only the time for one 10K packet and header plus a bit extra is needed.
81  * 10 * 1024 + 64 header byte = 10304 byte
82  * 10304 byte / 12.5 GB/s = 824.32ns
83  */
84 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
85 module_param(rcv_intr_timeout, uint, S_IRUGO);
86 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
87
88 uint rcv_intr_count = 16; /* same as qib */
89 module_param(rcv_intr_count, uint, S_IRUGO);
90 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
91
92 ushort link_crc_mask = SUPPORTED_CRCS;
93 module_param(link_crc_mask, ushort, S_IRUGO);
94 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
95
96 uint loopback;
97 module_param_named(loopback, loopback, uint, S_IRUGO);
98 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
99
100 /* Other driver tunables */
101 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
102 static ushort crc_14b_sideband = 1;
103 static uint use_flr = 1;
104 uint quick_linkup; /* skip LNI */
105
106 struct flag_table {
107         u64 flag;       /* the flag */
108         char *str;      /* description string */
109         u16 extra;      /* extra information */
110         u16 unused0;
111         u32 unused1;
112 };
113
114 /* str must be a string constant */
115 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
116 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
117
118 /* Send Error Consequences */
119 #define SEC_WRITE_DROPPED       0x1
120 #define SEC_PACKET_DROPPED      0x2
121 #define SEC_SC_HALTED           0x4     /* per-context only */
122 #define SEC_SPC_FREEZE          0x8     /* per-HFI only */
123
124 #define MIN_KERNEL_KCTXTS         2
125 #define FIRST_KERNEL_KCTXT        1
126 /* sizes for both the QP and RSM map tables */
127 #define NUM_MAP_ENTRIES         256
128 #define NUM_MAP_REGS             32
129
130 /* Bit offset into the GUID which carries HFI id information */
131 #define GUID_HFI_INDEX_SHIFT     39
132
133 /* extract the emulation revision */
134 #define emulator_rev(dd) ((dd)->irev >> 8)
135 /* parallel and serial emulation versions are 3 and 4 respectively */
136 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
137 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
138
139 /* RSM fields */
140
141 /* packet type */
142 #define IB_PACKET_TYPE         2ull
143 #define QW_SHIFT               6ull
144 /* QPN[7..1] */
145 #define QPN_WIDTH              7ull
146
147 /* LRH.BTH: QW 0, OFFSET 48 - for match */
148 #define LRH_BTH_QW             0ull
149 #define LRH_BTH_BIT_OFFSET     48ull
150 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
151 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
152 #define LRH_BTH_SELECT
153 #define LRH_BTH_MASK           3ull
154 #define LRH_BTH_VALUE          2ull
155
156 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
157 #define LRH_SC_QW              0ull
158 #define LRH_SC_BIT_OFFSET      56ull
159 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
160 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
161 #define LRH_SC_MASK            128ull
162 #define LRH_SC_VALUE           0ull
163
164 /* SC[n..0] QW 0, OFFSET 60 - for select */
165 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
166
167 /* QPN[m+n:1] QW 1, OFFSET 1 */
168 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
169
170 /* defines to build power on SC2VL table */
171 #define SC2VL_VAL( \
172         num, \
173         sc0, sc0val, \
174         sc1, sc1val, \
175         sc2, sc2val, \
176         sc3, sc3val, \
177         sc4, sc4val, \
178         sc5, sc5val, \
179         sc6, sc6val, \
180         sc7, sc7val) \
181 ( \
182         ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
183         ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
184         ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
185         ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
186         ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
187         ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
188         ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
189         ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
190 )
191
192 #define DC_SC_VL_VAL( \
193         range, \
194         e0, e0val, \
195         e1, e1val, \
196         e2, e2val, \
197         e3, e3val, \
198         e4, e4val, \
199         e5, e5val, \
200         e6, e6val, \
201         e7, e7val, \
202         e8, e8val, \
203         e9, e9val, \
204         e10, e10val, \
205         e11, e11val, \
206         e12, e12val, \
207         e13, e13val, \
208         e14, e14val, \
209         e15, e15val) \
210 ( \
211         ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
212         ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
213         ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
214         ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
215         ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
216         ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
217         ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
218         ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
219         ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
220         ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
221         ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
222         ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
223         ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
224         ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
225         ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
226         ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
227 )
228
229 /* all CceStatus sub-block freeze bits */
230 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
231                         | CCE_STATUS_RXE_FROZE_SMASK \
232                         | CCE_STATUS_TXE_FROZE_SMASK \
233                         | CCE_STATUS_TXE_PIO_FROZE_SMASK)
234 /* all CceStatus sub-block TXE pause bits */
235 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
236                         | CCE_STATUS_TXE_PAUSED_SMASK \
237                         | CCE_STATUS_SDMA_PAUSED_SMASK)
238 /* all CceStatus sub-block RXE pause bits */
239 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
240
241 /*
242  * CCE Error flags.
243  */
244 static struct flag_table cce_err_status_flags[] = {
245 /* 0*/  FLAG_ENTRY0("CceCsrParityErr",
246                 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
247 /* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
248                 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
249 /* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
250                 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
251 /* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
252                 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
253 /* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
254                 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
255 /* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
256                 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
257 /* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
258                 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
259 /* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
260                 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
261 /* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
262                 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
263 /* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
264             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
265 /*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
266             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
267 /*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
268             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
269 /*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
270                 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
271 /*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
272                 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
273 /*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
274                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
275 /*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
276                 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
277 /*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
278                 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
279 /*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
280                 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
281 /*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
282                 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
283 /*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
284                 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
285 /*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
286                 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
287 /*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
288                 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
289 /*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
290                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
291 /*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
292                 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
293 /*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
294                 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
295 /*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
296                 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
297 /*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
298                 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
299 /*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
300                 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
301 /*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
302                 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
303 /*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
304                 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
305 /*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
306                 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
307 /*31*/  FLAG_ENTRY0("LATriggered",
308                 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
309 /*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
310                 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
311 /*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
312                 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
313 /*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
314                 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
315 /*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
316                 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
317 /*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
318                 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
319 /*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
320                 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
321 /*38*/  FLAG_ENTRY0("CceIntMapCorErr",
322                 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
323 /*39*/  FLAG_ENTRY0("CceIntMapUncErr",
324                 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
325 /*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
326                 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
327 /*41-63 reserved*/
328 };
329
330 /*
331  * Misc Error flags
332  */
333 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
334 static struct flag_table misc_err_status_flags[] = {
335 /* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
336 /* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
337 /* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
338 /* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
339 /* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
340 /* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
341 /* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
342 /* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
343 /* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
344 /* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
345 /*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
346 /*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
347 /*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
348 };
349
350 /*
351  * TXE PIO Error flags and consequences
352  */
353 static struct flag_table pio_err_status_flags[] = {
354 /* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
355         SEC_WRITE_DROPPED,
356         SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
357 /* 1*/  FLAG_ENTRY("PioWriteAddrParity",
358         SEC_SPC_FREEZE,
359         SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
360 /* 2*/  FLAG_ENTRY("PioCsrParity",
361         SEC_SPC_FREEZE,
362         SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
363 /* 3*/  FLAG_ENTRY("PioSbMemFifo0",
364         SEC_SPC_FREEZE,
365         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
366 /* 4*/  FLAG_ENTRY("PioSbMemFifo1",
367         SEC_SPC_FREEZE,
368         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
369 /* 5*/  FLAG_ENTRY("PioPccFifoParity",
370         SEC_SPC_FREEZE,
371         SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
372 /* 6*/  FLAG_ENTRY("PioPecFifoParity",
373         SEC_SPC_FREEZE,
374         SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
375 /* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
376         SEC_SPC_FREEZE,
377         SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
378 /* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
379         SEC_SPC_FREEZE,
380         SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
381 /* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
382         SEC_SPC_FREEZE,
383         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
384 /*10*/  FLAG_ENTRY("PioSmPktResetParity",
385         SEC_SPC_FREEZE,
386         SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
387 /*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
388         SEC_SPC_FREEZE,
389         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
390 /*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
391         SEC_SPC_FREEZE,
392         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
393 /*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
394         0,
395         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
396 /*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
397         0,
398         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
399 /*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
400         SEC_SPC_FREEZE,
401         SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
402 /*16*/  FLAG_ENTRY("PioPpmcPblFifo",
403         SEC_SPC_FREEZE,
404         SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
405 /*17*/  FLAG_ENTRY("PioInitSmIn",
406         0,
407         SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
408 /*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
409         SEC_SPC_FREEZE,
410         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
411 /*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
412         SEC_SPC_FREEZE,
413         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
414 /*20*/  FLAG_ENTRY("PioHostAddrMemCor",
415         0,
416         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
417 /*21*/  FLAG_ENTRY("PioWriteDataParity",
418         SEC_SPC_FREEZE,
419         SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
420 /*22*/  FLAG_ENTRY("PioStateMachine",
421         SEC_SPC_FREEZE,
422         SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
423 /*23*/  FLAG_ENTRY("PioWriteQwValidParity",
424         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
425         SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
426 /*24*/  FLAG_ENTRY("PioBlockQwCountParity",
427         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
428         SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
429 /*25*/  FLAG_ENTRY("PioVlfVlLenParity",
430         SEC_SPC_FREEZE,
431         SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
432 /*26*/  FLAG_ENTRY("PioVlfSopParity",
433         SEC_SPC_FREEZE,
434         SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
435 /*27*/  FLAG_ENTRY("PioVlFifoParity",
436         SEC_SPC_FREEZE,
437         SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
438 /*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
439         SEC_SPC_FREEZE,
440         SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
441 /*29*/  FLAG_ENTRY("PioPpmcSopLen",
442         SEC_SPC_FREEZE,
443         SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
444 /*30-31 reserved*/
445 /*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
446         SEC_SPC_FREEZE,
447         SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
448 /*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
449         SEC_SPC_FREEZE,
450         SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
451 /*34*/  FLAG_ENTRY("PioPccSopHeadParity",
452         SEC_SPC_FREEZE,
453         SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
454 /*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
455         SEC_SPC_FREEZE,
456         SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
457 /*36-63 reserved*/
458 };
459
460 /* TXE PIO errors that cause an SPC freeze */
461 #define ALL_PIO_FREEZE_ERR \
462         (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
463         | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
464         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
465         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
466         | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
467         | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
468         | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
469         | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
470         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
471         | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
472         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
473         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
474         | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
475         | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
476         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
477         | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
478         | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
479         | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
480         | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
481         | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
482         | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
483         | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
484         | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
485         | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
486         | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
487         | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
488         | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
489         | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
490         | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
491
492 /*
493  * TXE SDMA Error flags
494  */
495 static struct flag_table sdma_err_status_flags[] = {
496 /* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
497                 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
498 /* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
499                 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
500 /* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
501                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
502 /* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
503                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
504 /*04-63 reserved*/
505 };
506
507 /* TXE SDMA errors that cause an SPC freeze */
508 #define ALL_SDMA_FREEZE_ERR  \
509                 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
510                 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
511                 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
512
513 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
514 #define PORT_DISCARD_EGRESS_ERRS \
515         (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
516         | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
517         | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
518
519 /*
520  * TXE Egress Error flags
521  */
522 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
523 static struct flag_table egress_err_status_flags[] = {
524 /* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
525 /* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
526 /* 2 reserved */
527 /* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
528                 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
529 /* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
530 /* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
531 /* 6 reserved */
532 /* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
533                 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
534 /* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
535                 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
536 /* 9-10 reserved */
537 /*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
538                 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
539 /*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
540 /*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
541 /*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
542 /*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
543 /*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
544                 SEES(TX_SDMA0_DISALLOWED_PACKET)),
545 /*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
546                 SEES(TX_SDMA1_DISALLOWED_PACKET)),
547 /*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
548                 SEES(TX_SDMA2_DISALLOWED_PACKET)),
549 /*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
550                 SEES(TX_SDMA3_DISALLOWED_PACKET)),
551 /*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
552                 SEES(TX_SDMA4_DISALLOWED_PACKET)),
553 /*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
554                 SEES(TX_SDMA5_DISALLOWED_PACKET)),
555 /*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
556                 SEES(TX_SDMA6_DISALLOWED_PACKET)),
557 /*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
558                 SEES(TX_SDMA7_DISALLOWED_PACKET)),
559 /*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
560                 SEES(TX_SDMA8_DISALLOWED_PACKET)),
561 /*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
562                 SEES(TX_SDMA9_DISALLOWED_PACKET)),
563 /*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
564                 SEES(TX_SDMA10_DISALLOWED_PACKET)),
565 /*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
566                 SEES(TX_SDMA11_DISALLOWED_PACKET)),
567 /*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
568                 SEES(TX_SDMA12_DISALLOWED_PACKET)),
569 /*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
570                 SEES(TX_SDMA13_DISALLOWED_PACKET)),
571 /*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
572                 SEES(TX_SDMA14_DISALLOWED_PACKET)),
573 /*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
574                 SEES(TX_SDMA15_DISALLOWED_PACKET)),
575 /*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
576                 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
577 /*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
578                 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
579 /*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
580                 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
581 /*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
582                 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
583 /*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
584                 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
585 /*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
586                 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
587 /*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
588                 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
589 /*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
590                 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
591 /*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
592                 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
593 /*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
594 /*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
595 /*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
596 /*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
597 /*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
598 /*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
599 /*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
600 /*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
601 /*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
602 /*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
603 /*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
604 /*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
605 /*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
606 /*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
607 /*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
608 /*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
609 /*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
610 /*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
611 /*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
612 /*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
613 /*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
614 /*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
615                 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
616 /*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
617                 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
618 };
619
620 /*
621  * TXE Egress Error Info flags
622  */
623 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
624 static struct flag_table egress_err_info_flags[] = {
625 /* 0*/  FLAG_ENTRY0("Reserved", 0ull),
626 /* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
627 /* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
628 /* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
629 /* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
630 /* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
631 /* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
632 /* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
633 /* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
634 /* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
635 /*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
636 /*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
637 /*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
638 /*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
639 /*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
640 /*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
641 /*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
642 /*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
643 /*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
644 /*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
645 /*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
646 /*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
647 };
648
649 /* TXE Egress errors that cause an SPC freeze */
650 #define ALL_TXE_EGRESS_FREEZE_ERR \
651         (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
652         | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
653         | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
654         | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
655         | SEES(TX_LAUNCH_CSR_PARITY) \
656         | SEES(TX_SBRD_CTL_CSR_PARITY) \
657         | SEES(TX_CONFIG_PARITY) \
658         | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
659         | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
660         | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
661         | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
662         | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
663         | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
664         | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
665         | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
666         | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
667         | SEES(TX_CREDIT_RETURN_PARITY))
668
669 /*
670  * TXE Send error flags
671  */
672 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
673 static struct flag_table send_err_status_flags[] = {
674 /* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
675 /* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
676 /* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
677 };
678
679 /*
680  * TXE Send Context Error flags and consequences
681  */
682 static struct flag_table sc_err_status_flags[] = {
683 /* 0*/  FLAG_ENTRY("InconsistentSop",
684                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
685                 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
686 /* 1*/  FLAG_ENTRY("DisallowedPacket",
687                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
688                 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
689 /* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
690                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
691                 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
692 /* 3*/  FLAG_ENTRY("WriteOverflow",
693                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
694                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
695 /* 4*/  FLAG_ENTRY("WriteOutOfBounds",
696                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
697                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
698 /* 5-63 reserved*/
699 };
700
701 /*
702  * RXE Receive Error flags
703  */
704 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
705 static struct flag_table rxe_err_status_flags[] = {
706 /* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
707 /* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
708 /* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
709 /* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
710 /* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
711 /* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
712 /* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
713 /* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
714 /* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
715 /* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
716 /*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
717 /*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
718 /*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
719 /*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
720 /*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
721 /*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
722 /*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
723                 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
724 /*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
725 /*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
726 /*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
727                 RXES(RBUF_BLOCK_LIST_READ_UNC)),
728 /*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
729                 RXES(RBUF_BLOCK_LIST_READ_COR)),
730 /*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
731                 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
732 /*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
733                 RXES(RBUF_CSR_QENT_CNT_PARITY)),
734 /*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
735                 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
736 /*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
737                 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
738 /*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
739 /*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
740 /*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
741                 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
742 /*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
743 /*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
744 /*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
745 /*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
746 /*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
747 /*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
748 /*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
749 /*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
750                 RXES(RBUF_FL_INITDONE_PARITY)),
751 /*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
752                 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
753 /*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
754 /*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
755 /*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
756 /*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
757                 RXES(LOOKUP_DES_PART1_UNC_COR)),
758 /*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
759                 RXES(LOOKUP_DES_PART2_PARITY)),
760 /*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
761 /*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
762 /*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
763 /*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
764 /*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
765 /*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
766 /*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
767 /*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
768 /*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
769 /*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
770 /*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
771 /*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
772 /*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
773 /*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
774 /*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
775 /*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
776 /*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
777 /*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
778 /*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
779 /*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
780 /*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
781 /*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
782 };
783
784 /* RXE errors that will trigger an SPC freeze */
785 #define ALL_RXE_FREEZE_ERR  \
786         (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
787         | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
788         | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
789         | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
790         | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
791         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
792         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
793         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
794         | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
795         | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
796         | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
797         | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
798         | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
799         | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
800         | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
801         | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
802         | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
803         | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
804         | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
805         | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
806         | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
807         | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
808         | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
809         | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
810         | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
811         | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
812         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
813         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
814         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
815         | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
816         | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
817         | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
818         | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
819         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
820         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
821         | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
822         | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
823         | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
824         | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
825         | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
826         | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
827         | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
828         | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
829         | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
830
831 #define RXE_FREEZE_ABORT_MASK \
832         (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
833         RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
834         RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
835
836 /*
837  * DCC Error Flags
838  */
839 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
840 static struct flag_table dcc_err_flags[] = {
841         FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
842         FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
843         FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
844         FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
845         FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
846         FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
847         FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
848         FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
849         FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
850         FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
851         FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
852         FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
853         FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
854         FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
855         FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
856         FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
857         FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
858         FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
859         FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
860         FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
861         FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
862         FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
863         FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
864         FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
865         FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
866         FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
867         FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
868         FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
869         FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
870         FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
871         FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
872         FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
873         FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
874         FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
875         FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
876         FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
877         FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
878         FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
879         FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
880         FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
881         FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
882         FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
883         FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
884         FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
885         FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
886         FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
887 };
888
889 /*
890  * LCB error flags
891  */
892 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
893 static struct flag_table lcb_err_flags[] = {
894 /* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
895 /* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
896 /* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
897 /* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
898                 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
899 /* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
900 /* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
901 /* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
902 /* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
903 /* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
904 /* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
905 /*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
906 /*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
907 /*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
908 /*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
909                 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
910 /*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
911 /*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
912 /*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
913 /*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
914 /*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
915 /*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
916                 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
917 /*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
918 /*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
919 /*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
920 /*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
921 /*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
922 /*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
923 /*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
924                 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
925 /*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
926 /*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
927                 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
928 /*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
929                 LCBE(REDUNDANT_FLIT_PARITY_ERR))
930 };
931
932 /*
933  * DC8051 Error Flags
934  */
935 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
936 static struct flag_table dc8051_err_flags[] = {
937         FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
938         FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
939         FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
940         FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
941         FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
942         FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
943         FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
944         FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
945         FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
946                     D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
947         FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
948 };
949
950 /*
951  * DC8051 Information Error flags
952  *
953  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
954  */
955 static struct flag_table dc8051_info_err_flags[] = {
956         FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
957         FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
958         FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
959         FLAG_ENTRY0("Serdes internal loopback failure",
960                     FAILED_SERDES_INTERNAL_LOOPBACK),
961         FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
962         FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
963         FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
964         FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
965         FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
966         FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
967         FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
968         FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
969         FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT)
970 };
971
972 /*
973  * DC8051 Information Host Information flags
974  *
975  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
976  */
977 static struct flag_table dc8051_info_host_msg_flags[] = {
978         FLAG_ENTRY0("Host request done", 0x0001),
979         FLAG_ENTRY0("BC SMA message", 0x0002),
980         FLAG_ENTRY0("BC PWR_MGM message", 0x0004),
981         FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
982         FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
983         FLAG_ENTRY0("External device config request", 0x0020),
984         FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
985         FLAG_ENTRY0("LinkUp achieved", 0x0080),
986         FLAG_ENTRY0("Link going down", 0x0100),
987 };
988
989 static u32 encoded_size(u32 size);
990 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
991 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
992 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
993                                u8 *continuous);
994 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
995                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
996 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
997                                       u8 *remote_tx_rate, u16 *link_widths);
998 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
999                                      u8 *flag_bits, u16 *link_widths);
1000 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1001                                   u8 *device_rev);
1002 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
1003 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1004 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1005                             u8 *tx_polarity_inversion,
1006                             u8 *rx_polarity_inversion, u8 *max_rate);
1007 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1008                                 unsigned int context, u64 err_status);
1009 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1010 static void handle_dcc_err(struct hfi1_devdata *dd,
1011                            unsigned int context, u64 err_status);
1012 static void handle_lcb_err(struct hfi1_devdata *dd,
1013                            unsigned int context, u64 err_status);
1014 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1015 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1016 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1017 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1018 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1019 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1020 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1021 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1022 static void set_partition_keys(struct hfi1_pportdata *);
1023 static const char *link_state_name(u32 state);
1024 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1025                                           u32 state);
1026 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1027                            u64 *out_data);
1028 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1029 static int thermal_init(struct hfi1_devdata *dd);
1030
1031 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1032                                   int msecs);
1033 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1034 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1035 static void handle_temp_err(struct hfi1_devdata *);
1036 static void dc_shutdown(struct hfi1_devdata *);
1037 static void dc_start(struct hfi1_devdata *);
1038 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1039                            unsigned int *np);
1040 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1041
1042 /*
1043  * Error interrupt table entry.  This is used as input to the interrupt
1044  * "clear down" routine used for all second tier error interrupt register.
1045  * Second tier interrupt registers have a single bit representing them
1046  * in the top-level CceIntStatus.
1047  */
1048 struct err_reg_info {
1049         u32 status;             /* status CSR offset */
1050         u32 clear;              /* clear CSR offset */
1051         u32 mask;               /* mask CSR offset */
1052         void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1053         const char *desc;
1054 };
1055
1056 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
1057 #define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
1058 #define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
1059
1060 /*
1061  * Helpers for building HFI and DC error interrupt table entries.  Different
1062  * helpers are needed because of inconsistent register names.
1063  */
1064 #define EE(reg, handler, desc) \
1065         { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1066                 handler, desc }
1067 #define DC_EE1(reg, handler, desc) \
1068         { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1069 #define DC_EE2(reg, handler, desc) \
1070         { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1071
1072 /*
1073  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1074  * another register containing more information.
1075  */
1076 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1077 /* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1078 /* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1079 /* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1080 /* 3*/  { 0, 0, 0, NULL }, /* reserved */
1081 /* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1082 /* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1083 /* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1084 /* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1085         /* the rest are reserved */
1086 };
1087
1088 /*
1089  * Index into the Various section of the interrupt sources
1090  * corresponding to the Critical Temperature interrupt.
1091  */
1092 #define TCRIT_INT_SOURCE 4
1093
1094 /*
1095  * SDMA error interrupt entry - refers to another register containing more
1096  * information.
1097  */
1098 static const struct err_reg_info sdma_eng_err =
1099         EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1100
1101 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1102 /* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1103 /* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1104 /* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1105 /* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1106 /* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1107         /* rest are reserved */
1108 };
1109
1110 /*
1111  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1112  * register can not be derived from the MTU value because 10K is not
1113  * a power of 2. Therefore, we need a constant. Everything else can
1114  * be calculated.
1115  */
1116 #define DCC_CFG_PORT_MTU_CAP_10240 7
1117
1118 /*
1119  * Table of the DC grouping of error interrupts.  Each entry refers to
1120  * another register containing more information.
1121  */
1122 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1123 /* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1124 /* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1125 /* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1126 /* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1127         /* the rest are reserved */
1128 };
1129
1130 struct cntr_entry {
1131         /*
1132          * counter name
1133          */
1134         char *name;
1135
1136         /*
1137          * csr to read for name (if applicable)
1138          */
1139         u64 csr;
1140
1141         /*
1142          * offset into dd or ppd to store the counter's value
1143          */
1144         int offset;
1145
1146         /*
1147          * flags
1148          */
1149         u8 flags;
1150
1151         /*
1152          * accessor for stat element, context either dd or ppd
1153          */
1154         u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1155                        int mode, u64 data);
1156 };
1157
1158 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1159 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1160
1161 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1162 { \
1163         name, \
1164         csr, \
1165         offset, \
1166         flags, \
1167         accessor \
1168 }
1169
1170 /* 32bit RXE */
1171 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1172 CNTR_ELEM(#name, \
1173           (counter * 8 + RCV_COUNTER_ARRAY32), \
1174           0, flags | CNTR_32BIT, \
1175           port_access_u32_csr)
1176
1177 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1178 CNTR_ELEM(#name, \
1179           (counter * 8 + RCV_COUNTER_ARRAY32), \
1180           0, flags | CNTR_32BIT, \
1181           dev_access_u32_csr)
1182
1183 /* 64bit RXE */
1184 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1185 CNTR_ELEM(#name, \
1186           (counter * 8 + RCV_COUNTER_ARRAY64), \
1187           0, flags, \
1188           port_access_u64_csr)
1189
1190 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1191 CNTR_ELEM(#name, \
1192           (counter * 8 + RCV_COUNTER_ARRAY64), \
1193           0, flags, \
1194           dev_access_u64_csr)
1195
1196 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1197 #define OVR_ELM(ctx) \
1198 CNTR_ELEM("RcvHdrOvr" #ctx, \
1199           (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1200           0, CNTR_NORMAL, port_access_u64_csr)
1201
1202 /* 32bit TXE */
1203 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1204 CNTR_ELEM(#name, \
1205           (counter * 8 + SEND_COUNTER_ARRAY32), \
1206           0, flags | CNTR_32BIT, \
1207           port_access_u32_csr)
1208
1209 /* 64bit TXE */
1210 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1211 CNTR_ELEM(#name, \
1212           (counter * 8 + SEND_COUNTER_ARRAY64), \
1213           0, flags, \
1214           port_access_u64_csr)
1215
1216 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1217 CNTR_ELEM(#name,\
1218           counter * 8 + SEND_COUNTER_ARRAY64, \
1219           0, \
1220           flags, \
1221           dev_access_u64_csr)
1222
1223 /* CCE */
1224 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1225 CNTR_ELEM(#name, \
1226           (counter * 8 + CCE_COUNTER_ARRAY32), \
1227           0, flags | CNTR_32BIT, \
1228           dev_access_u32_csr)
1229
1230 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1231 CNTR_ELEM(#name, \
1232           (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1233           0, flags | CNTR_32BIT, \
1234           dev_access_u32_csr)
1235
1236 /* DC */
1237 #define DC_PERF_CNTR(name, counter, flags) \
1238 CNTR_ELEM(#name, \
1239           counter, \
1240           0, \
1241           flags, \
1242           dev_access_u64_csr)
1243
1244 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1245 CNTR_ELEM(#name, \
1246           counter, \
1247           0, \
1248           flags, \
1249           dc_access_lcb_cntr)
1250
1251 /* ibp counters */
1252 #define SW_IBP_CNTR(name, cntr) \
1253 CNTR_ELEM(#name, \
1254           0, \
1255           0, \
1256           CNTR_SYNTH, \
1257           access_ibp_##cntr)
1258
1259 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1260 {
1261         if (dd->flags & HFI1_PRESENT) {
1262                 return readq((void __iomem *)dd->kregbase + offset);
1263         }
1264         return -1;
1265 }
1266
1267 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1268 {
1269         if (dd->flags & HFI1_PRESENT)
1270                 writeq(value, (void __iomem *)dd->kregbase + offset);
1271 }
1272
1273 void __iomem *get_csr_addr(
1274         struct hfi1_devdata *dd,
1275         u32 offset)
1276 {
1277         return (void __iomem *)dd->kregbase + offset;
1278 }
1279
1280 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1281                                  int mode, u64 value)
1282 {
1283         u64 ret;
1284
1285         if (mode == CNTR_MODE_R) {
1286                 ret = read_csr(dd, csr);
1287         } else if (mode == CNTR_MODE_W) {
1288                 write_csr(dd, csr, value);
1289                 ret = value;
1290         } else {
1291                 dd_dev_err(dd, "Invalid cntr register access mode");
1292                 return 0;
1293         }
1294
1295         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1296         return ret;
1297 }
1298
1299 /* Dev Access */
1300 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1301                               void *context, int vl, int mode, u64 data)
1302 {
1303         struct hfi1_devdata *dd = context;
1304         u64 csr = entry->csr;
1305
1306         if (entry->flags & CNTR_SDMA) {
1307                 if (vl == CNTR_INVALID_VL)
1308                         return 0;
1309                 csr += 0x100 * vl;
1310         } else {
1311                 if (vl != CNTR_INVALID_VL)
1312                         return 0;
1313         }
1314         return read_write_csr(dd, csr, mode, data);
1315 }
1316
1317 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1318                               void *context, int idx, int mode, u64 data)
1319 {
1320         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1321
1322         if (dd->per_sdma && idx < dd->num_sdma)
1323                 return dd->per_sdma[idx].err_cnt;
1324         return 0;
1325 }
1326
1327 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1328                               void *context, int idx, int mode, u64 data)
1329 {
1330         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1331
1332         if (dd->per_sdma && idx < dd->num_sdma)
1333                 return dd->per_sdma[idx].sdma_int_cnt;
1334         return 0;
1335 }
1336
1337 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1338                                    void *context, int idx, int mode, u64 data)
1339 {
1340         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1341
1342         if (dd->per_sdma && idx < dd->num_sdma)
1343                 return dd->per_sdma[idx].idle_int_cnt;
1344         return 0;
1345 }
1346
1347 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1348                                        void *context, int idx, int mode,
1349                                        u64 data)
1350 {
1351         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1352
1353         if (dd->per_sdma && idx < dd->num_sdma)
1354                 return dd->per_sdma[idx].progress_int_cnt;
1355         return 0;
1356 }
1357
1358 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1359                               int vl, int mode, u64 data)
1360 {
1361         struct hfi1_devdata *dd = context;
1362
1363         u64 val = 0;
1364         u64 csr = entry->csr;
1365
1366         if (entry->flags & CNTR_VL) {
1367                 if (vl == CNTR_INVALID_VL)
1368                         return 0;
1369                 csr += 8 * vl;
1370         } else {
1371                 if (vl != CNTR_INVALID_VL)
1372                         return 0;
1373         }
1374
1375         val = read_write_csr(dd, csr, mode, data);
1376         return val;
1377 }
1378
1379 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1380                               int vl, int mode, u64 data)
1381 {
1382         struct hfi1_devdata *dd = context;
1383         u32 csr = entry->csr;
1384         int ret = 0;
1385
1386         if (vl != CNTR_INVALID_VL)
1387                 return 0;
1388         if (mode == CNTR_MODE_R)
1389                 ret = read_lcb_csr(dd, csr, &data);
1390         else if (mode == CNTR_MODE_W)
1391                 ret = write_lcb_csr(dd, csr, data);
1392
1393         if (ret) {
1394                 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1395                 return 0;
1396         }
1397
1398         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1399         return data;
1400 }
1401
1402 /* Port Access */
1403 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1404                                int vl, int mode, u64 data)
1405 {
1406         struct hfi1_pportdata *ppd = context;
1407
1408         if (vl != CNTR_INVALID_VL)
1409                 return 0;
1410         return read_write_csr(ppd->dd, entry->csr, mode, data);
1411 }
1412
1413 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1414                                void *context, int vl, int mode, u64 data)
1415 {
1416         struct hfi1_pportdata *ppd = context;
1417         u64 val;
1418         u64 csr = entry->csr;
1419
1420         if (entry->flags & CNTR_VL) {
1421                 if (vl == CNTR_INVALID_VL)
1422                         return 0;
1423                 csr += 8 * vl;
1424         } else {
1425                 if (vl != CNTR_INVALID_VL)
1426                         return 0;
1427         }
1428         val = read_write_csr(ppd->dd, csr, mode, data);
1429         return val;
1430 }
1431
1432 /* Software defined */
1433 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1434                                 u64 data)
1435 {
1436         u64 ret;
1437
1438         if (mode == CNTR_MODE_R) {
1439                 ret = *cntr;
1440         } else if (mode == CNTR_MODE_W) {
1441                 *cntr = data;
1442                 ret = data;
1443         } else {
1444                 dd_dev_err(dd, "Invalid cntr sw access mode");
1445                 return 0;
1446         }
1447
1448         hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1449
1450         return ret;
1451 }
1452
1453 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1454                                  int vl, int mode, u64 data)
1455 {
1456         struct hfi1_pportdata *ppd = context;
1457
1458         if (vl != CNTR_INVALID_VL)
1459                 return 0;
1460         return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1461 }
1462
1463 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1464                                  int vl, int mode, u64 data)
1465 {
1466         struct hfi1_pportdata *ppd = context;
1467
1468         if (vl != CNTR_INVALID_VL)
1469                 return 0;
1470         return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1471 }
1472
1473 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1474                                        void *context, int vl, int mode,
1475                                        u64 data)
1476 {
1477         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1478
1479         if (vl != CNTR_INVALID_VL)
1480                 return 0;
1481         return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1482 }
1483
1484 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1485                                    void *context, int vl, int mode, u64 data)
1486 {
1487         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1488         u64 zero = 0;
1489         u64 *counter;
1490
1491         if (vl == CNTR_INVALID_VL)
1492                 counter = &ppd->port_xmit_discards;
1493         else if (vl >= 0 && vl < C_VL_COUNT)
1494                 counter = &ppd->port_xmit_discards_vl[vl];
1495         else
1496                 counter = &zero;
1497
1498         return read_write_sw(ppd->dd, counter, mode, data);
1499 }
1500
1501 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1502                                        void *context, int vl, int mode,
1503                                        u64 data)
1504 {
1505         struct hfi1_pportdata *ppd = context;
1506
1507         if (vl != CNTR_INVALID_VL)
1508                 return 0;
1509
1510         return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1511                              mode, data);
1512 }
1513
1514 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1515                                       void *context, int vl, int mode, u64 data)
1516 {
1517         struct hfi1_pportdata *ppd = context;
1518
1519         if (vl != CNTR_INVALID_VL)
1520                 return 0;
1521
1522         return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1523                              mode, data);
1524 }
1525
1526 u64 get_all_cpu_total(u64 __percpu *cntr)
1527 {
1528         int cpu;
1529         u64 counter = 0;
1530
1531         for_each_possible_cpu(cpu)
1532                 counter += *per_cpu_ptr(cntr, cpu);
1533         return counter;
1534 }
1535
1536 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1537                           u64 __percpu *cntr,
1538                           int vl, int mode, u64 data)
1539 {
1540         u64 ret = 0;
1541
1542         if (vl != CNTR_INVALID_VL)
1543                 return 0;
1544
1545         if (mode == CNTR_MODE_R) {
1546                 ret = get_all_cpu_total(cntr) - *z_val;
1547         } else if (mode == CNTR_MODE_W) {
1548                 /* A write can only zero the counter */
1549                 if (data == 0)
1550                         *z_val = get_all_cpu_total(cntr);
1551                 else
1552                         dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1553         } else {
1554                 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1555                 return 0;
1556         }
1557
1558         return ret;
1559 }
1560
1561 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1562                               void *context, int vl, int mode, u64 data)
1563 {
1564         struct hfi1_devdata *dd = context;
1565
1566         return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1567                               mode, data);
1568 }
1569
1570 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1571                                    void *context, int vl, int mode, u64 data)
1572 {
1573         struct hfi1_devdata *dd = context;
1574
1575         return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1576                               mode, data);
1577 }
1578
1579 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1580                               void *context, int vl, int mode, u64 data)
1581 {
1582         struct hfi1_devdata *dd = context;
1583
1584         return dd->verbs_dev.n_piowait;
1585 }
1586
1587 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1588                                void *context, int vl, int mode, u64 data)
1589 {
1590         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1591
1592         return dd->verbs_dev.n_piodrain;
1593 }
1594
1595 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1596                               void *context, int vl, int mode, u64 data)
1597 {
1598         struct hfi1_devdata *dd = context;
1599
1600         return dd->verbs_dev.n_txwait;
1601 }
1602
1603 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1604                                void *context, int vl, int mode, u64 data)
1605 {
1606         struct hfi1_devdata *dd = context;
1607
1608         return dd->verbs_dev.n_kmem_wait;
1609 }
1610
1611 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1612                                    void *context, int vl, int mode, u64 data)
1613 {
1614         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1615
1616         return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1617                               mode, data);
1618 }
1619
1620 /* Software counters for the error status bits within MISC_ERR_STATUS */
1621 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1622                                              void *context, int vl, int mode,
1623                                              u64 data)
1624 {
1625         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1626
1627         return dd->misc_err_status_cnt[12];
1628 }
1629
1630 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1631                                           void *context, int vl, int mode,
1632                                           u64 data)
1633 {
1634         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1635
1636         return dd->misc_err_status_cnt[11];
1637 }
1638
1639 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1640                                                void *context, int vl, int mode,
1641                                                u64 data)
1642 {
1643         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1644
1645         return dd->misc_err_status_cnt[10];
1646 }
1647
1648 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1649                                                  void *context, int vl,
1650                                                  int mode, u64 data)
1651 {
1652         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1653
1654         return dd->misc_err_status_cnt[9];
1655 }
1656
1657 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1658                                            void *context, int vl, int mode,
1659                                            u64 data)
1660 {
1661         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1662
1663         return dd->misc_err_status_cnt[8];
1664 }
1665
1666 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1667                                 const struct cntr_entry *entry,
1668                                 void *context, int vl, int mode, u64 data)
1669 {
1670         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1671
1672         return dd->misc_err_status_cnt[7];
1673 }
1674
1675 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1676                                                 void *context, int vl,
1677                                                 int mode, u64 data)
1678 {
1679         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1680
1681         return dd->misc_err_status_cnt[6];
1682 }
1683
1684 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1685                                               void *context, int vl, int mode,
1686                                               u64 data)
1687 {
1688         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1689
1690         return dd->misc_err_status_cnt[5];
1691 }
1692
1693 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1694                                             void *context, int vl, int mode,
1695                                             u64 data)
1696 {
1697         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1698
1699         return dd->misc_err_status_cnt[4];
1700 }
1701
1702 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1703                                                  void *context, int vl,
1704                                                  int mode, u64 data)
1705 {
1706         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1707
1708         return dd->misc_err_status_cnt[3];
1709 }
1710
1711 static u64 access_misc_csr_write_bad_addr_err_cnt(
1712                                 const struct cntr_entry *entry,
1713                                 void *context, int vl, int mode, u64 data)
1714 {
1715         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1716
1717         return dd->misc_err_status_cnt[2];
1718 }
1719
1720 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1721                                                  void *context, int vl,
1722                                                  int mode, u64 data)
1723 {
1724         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1725
1726         return dd->misc_err_status_cnt[1];
1727 }
1728
1729 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1730                                           void *context, int vl, int mode,
1731                                           u64 data)
1732 {
1733         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1734
1735         return dd->misc_err_status_cnt[0];
1736 }
1737
1738 /*
1739  * Software counter for the aggregate of
1740  * individual CceErrStatus counters
1741  */
1742 static u64 access_sw_cce_err_status_aggregated_cnt(
1743                                 const struct cntr_entry *entry,
1744                                 void *context, int vl, int mode, u64 data)
1745 {
1746         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1747
1748         return dd->sw_cce_err_status_aggregate;
1749 }
1750
1751 /*
1752  * Software counters corresponding to each of the
1753  * error status bits within CceErrStatus
1754  */
1755 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1756                                               void *context, int vl, int mode,
1757                                               u64 data)
1758 {
1759         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1760
1761         return dd->cce_err_status_cnt[40];
1762 }
1763
1764 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1765                                           void *context, int vl, int mode,
1766                                           u64 data)
1767 {
1768         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1769
1770         return dd->cce_err_status_cnt[39];
1771 }
1772
1773 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1774                                           void *context, int vl, int mode,
1775                                           u64 data)
1776 {
1777         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1778
1779         return dd->cce_err_status_cnt[38];
1780 }
1781
1782 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1783                                              void *context, int vl, int mode,
1784                                              u64 data)
1785 {
1786         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1787
1788         return dd->cce_err_status_cnt[37];
1789 }
1790
1791 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1792                                              void *context, int vl, int mode,
1793                                              u64 data)
1794 {
1795         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1796
1797         return dd->cce_err_status_cnt[36];
1798 }
1799
1800 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1801                                 const struct cntr_entry *entry,
1802                                 void *context, int vl, int mode, u64 data)
1803 {
1804         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1805
1806         return dd->cce_err_status_cnt[35];
1807 }
1808
1809 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1810                                 const struct cntr_entry *entry,
1811                                 void *context, int vl, int mode, u64 data)
1812 {
1813         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1814
1815         return dd->cce_err_status_cnt[34];
1816 }
1817
1818 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1819                                                  void *context, int vl,
1820                                                  int mode, u64 data)
1821 {
1822         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1823
1824         return dd->cce_err_status_cnt[33];
1825 }
1826
1827 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1828                                                 void *context, int vl, int mode,
1829                                                 u64 data)
1830 {
1831         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1832
1833         return dd->cce_err_status_cnt[32];
1834 }
1835
1836 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1837                                    void *context, int vl, int mode, u64 data)
1838 {
1839         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840
1841         return dd->cce_err_status_cnt[31];
1842 }
1843
1844 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1845                                                void *context, int vl, int mode,
1846                                                u64 data)
1847 {
1848         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1849
1850         return dd->cce_err_status_cnt[30];
1851 }
1852
1853 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1854                                               void *context, int vl, int mode,
1855                                               u64 data)
1856 {
1857         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1858
1859         return dd->cce_err_status_cnt[29];
1860 }
1861
1862 static u64 access_pcic_transmit_back_parity_err_cnt(
1863                                 const struct cntr_entry *entry,
1864                                 void *context, int vl, int mode, u64 data)
1865 {
1866         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1867
1868         return dd->cce_err_status_cnt[28];
1869 }
1870
1871 static u64 access_pcic_transmit_front_parity_err_cnt(
1872                                 const struct cntr_entry *entry,
1873                                 void *context, int vl, int mode, u64 data)
1874 {
1875         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1876
1877         return dd->cce_err_status_cnt[27];
1878 }
1879
1880 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1881                                              void *context, int vl, int mode,
1882                                              u64 data)
1883 {
1884         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1885
1886         return dd->cce_err_status_cnt[26];
1887 }
1888
1889 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1890                                             void *context, int vl, int mode,
1891                                             u64 data)
1892 {
1893         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1894
1895         return dd->cce_err_status_cnt[25];
1896 }
1897
1898 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1899                                               void *context, int vl, int mode,
1900                                               u64 data)
1901 {
1902         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1903
1904         return dd->cce_err_status_cnt[24];
1905 }
1906
1907 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1908                                              void *context, int vl, int mode,
1909                                              u64 data)
1910 {
1911         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1912
1913         return dd->cce_err_status_cnt[23];
1914 }
1915
1916 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
1917                                                  void *context, int vl,
1918                                                  int mode, u64 data)
1919 {
1920         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1921
1922         return dd->cce_err_status_cnt[22];
1923 }
1924
1925 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
1926                                          void *context, int vl, int mode,
1927                                          u64 data)
1928 {
1929         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1930
1931         return dd->cce_err_status_cnt[21];
1932 }
1933
1934 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
1935                                 const struct cntr_entry *entry,
1936                                 void *context, int vl, int mode, u64 data)
1937 {
1938         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1939
1940         return dd->cce_err_status_cnt[20];
1941 }
1942
1943 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
1944                                                  void *context, int vl,
1945                                                  int mode, u64 data)
1946 {
1947         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1948
1949         return dd->cce_err_status_cnt[19];
1950 }
1951
1952 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1953                                              void *context, int vl, int mode,
1954                                              u64 data)
1955 {
1956         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1957
1958         return dd->cce_err_status_cnt[18];
1959 }
1960
1961 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1962                                             void *context, int vl, int mode,
1963                                             u64 data)
1964 {
1965         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1966
1967         return dd->cce_err_status_cnt[17];
1968 }
1969
1970 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
1971                                               void *context, int vl, int mode,
1972                                               u64 data)
1973 {
1974         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1975
1976         return dd->cce_err_status_cnt[16];
1977 }
1978
1979 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
1980                                              void *context, int vl, int mode,
1981                                              u64 data)
1982 {
1983         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1984
1985         return dd->cce_err_status_cnt[15];
1986 }
1987
1988 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
1989                                                  void *context, int vl,
1990                                                  int mode, u64 data)
1991 {
1992         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1993
1994         return dd->cce_err_status_cnt[14];
1995 }
1996
1997 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
1998                                              void *context, int vl, int mode,
1999                                              u64 data)
2000 {
2001         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2002
2003         return dd->cce_err_status_cnt[13];
2004 }
2005
2006 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2007                                 const struct cntr_entry *entry,
2008                                 void *context, int vl, int mode, u64 data)
2009 {
2010         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2011
2012         return dd->cce_err_status_cnt[12];
2013 }
2014
2015 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2016                                 const struct cntr_entry *entry,
2017                                 void *context, int vl, int mode, u64 data)
2018 {
2019         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2020
2021         return dd->cce_err_status_cnt[11];
2022 }
2023
2024 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2025                                 const struct cntr_entry *entry,
2026                                 void *context, int vl, int mode, u64 data)
2027 {
2028         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2029
2030         return dd->cce_err_status_cnt[10];
2031 }
2032
2033 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2034                                 const struct cntr_entry *entry,
2035                                 void *context, int vl, int mode, u64 data)
2036 {
2037         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2038
2039         return dd->cce_err_status_cnt[9];
2040 }
2041
2042 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2043                                 const struct cntr_entry *entry,
2044                                 void *context, int vl, int mode, u64 data)
2045 {
2046         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2047
2048         return dd->cce_err_status_cnt[8];
2049 }
2050
2051 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2052                                                  void *context, int vl,
2053                                                  int mode, u64 data)
2054 {
2055         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2056
2057         return dd->cce_err_status_cnt[7];
2058 }
2059
2060 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2061                                 const struct cntr_entry *entry,
2062                                 void *context, int vl, int mode, u64 data)
2063 {
2064         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2065
2066         return dd->cce_err_status_cnt[6];
2067 }
2068
2069 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2070                                                void *context, int vl, int mode,
2071                                                u64 data)
2072 {
2073         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2074
2075         return dd->cce_err_status_cnt[5];
2076 }
2077
2078 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2079                                           void *context, int vl, int mode,
2080                                           u64 data)
2081 {
2082         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2083
2084         return dd->cce_err_status_cnt[4];
2085 }
2086
2087 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2088                                 const struct cntr_entry *entry,
2089                                 void *context, int vl, int mode, u64 data)
2090 {
2091         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2092
2093         return dd->cce_err_status_cnt[3];
2094 }
2095
2096 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2097                                                  void *context, int vl,
2098                                                  int mode, u64 data)
2099 {
2100         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2101
2102         return dd->cce_err_status_cnt[2];
2103 }
2104
2105 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2106                                                 void *context, int vl,
2107                                                 int mode, u64 data)
2108 {
2109         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2110
2111         return dd->cce_err_status_cnt[1];
2112 }
2113
2114 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2115                                          void *context, int vl, int mode,
2116                                          u64 data)
2117 {
2118         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2119
2120         return dd->cce_err_status_cnt[0];
2121 }
2122
2123 /*
2124  * Software counters corresponding to each of the
2125  * error status bits within RcvErrStatus
2126  */
2127 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2128                                         void *context, int vl, int mode,
2129                                         u64 data)
2130 {
2131         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2132
2133         return dd->rcv_err_status_cnt[63];
2134 }
2135
2136 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2137                                                 void *context, int vl,
2138                                                 int mode, u64 data)
2139 {
2140         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2141
2142         return dd->rcv_err_status_cnt[62];
2143 }
2144
2145 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2146                                                void *context, int vl, int mode,
2147                                                u64 data)
2148 {
2149         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2150
2151         return dd->rcv_err_status_cnt[61];
2152 }
2153
2154 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2155                                          void *context, int vl, int mode,
2156                                          u64 data)
2157 {
2158         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2159
2160         return dd->rcv_err_status_cnt[60];
2161 }
2162
2163 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2164                                                  void *context, int vl,
2165                                                  int mode, u64 data)
2166 {
2167         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2168
2169         return dd->rcv_err_status_cnt[59];
2170 }
2171
2172 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2173                                                  void *context, int vl,
2174                                                  int mode, u64 data)
2175 {
2176         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2177
2178         return dd->rcv_err_status_cnt[58];
2179 }
2180
2181 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2182                                             void *context, int vl, int mode,
2183                                             u64 data)
2184 {
2185         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2186
2187         return dd->rcv_err_status_cnt[57];
2188 }
2189
2190 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2191                                            void *context, int vl, int mode,
2192                                            u64 data)
2193 {
2194         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2195
2196         return dd->rcv_err_status_cnt[56];
2197 }
2198
2199 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2200                                            void *context, int vl, int mode,
2201                                            u64 data)
2202 {
2203         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2204
2205         return dd->rcv_err_status_cnt[55];
2206 }
2207
2208 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2209                                 const struct cntr_entry *entry,
2210                                 void *context, int vl, int mode, u64 data)
2211 {
2212         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2213
2214         return dd->rcv_err_status_cnt[54];
2215 }
2216
2217 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2218                                 const struct cntr_entry *entry,
2219                                 void *context, int vl, int mode, u64 data)
2220 {
2221         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2222
2223         return dd->rcv_err_status_cnt[53];
2224 }
2225
2226 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2227                                                  void *context, int vl,
2228                                                  int mode, u64 data)
2229 {
2230         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2231
2232         return dd->rcv_err_status_cnt[52];
2233 }
2234
2235 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2236                                                  void *context, int vl,
2237                                                  int mode, u64 data)
2238 {
2239         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2240
2241         return dd->rcv_err_status_cnt[51];
2242 }
2243
2244 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2245                                                  void *context, int vl,
2246                                                  int mode, u64 data)
2247 {
2248         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2249
2250         return dd->rcv_err_status_cnt[50];
2251 }
2252
2253 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2254                                                  void *context, int vl,
2255                                                  int mode, u64 data)
2256 {
2257         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2258
2259         return dd->rcv_err_status_cnt[49];
2260 }
2261
2262 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2263                                                  void *context, int vl,
2264                                                  int mode, u64 data)
2265 {
2266         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2267
2268         return dd->rcv_err_status_cnt[48];
2269 }
2270
2271 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2272                                                  void *context, int vl,
2273                                                  int mode, u64 data)
2274 {
2275         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2276
2277         return dd->rcv_err_status_cnt[47];
2278 }
2279
2280 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2281                                          void *context, int vl, int mode,
2282                                          u64 data)
2283 {
2284         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2285
2286         return dd->rcv_err_status_cnt[46];
2287 }
2288
2289 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2290                                 const struct cntr_entry *entry,
2291                                 void *context, int vl, int mode, u64 data)
2292 {
2293         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2294
2295         return dd->rcv_err_status_cnt[45];
2296 }
2297
2298 static u64 access_rx_lookup_csr_parity_err_cnt(
2299                                 const struct cntr_entry *entry,
2300                                 void *context, int vl, int mode, u64 data)
2301 {
2302         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2303
2304         return dd->rcv_err_status_cnt[44];
2305 }
2306
2307 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2308                                 const struct cntr_entry *entry,
2309                                 void *context, int vl, int mode, u64 data)
2310 {
2311         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2312
2313         return dd->rcv_err_status_cnt[43];
2314 }
2315
2316 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2317                                 const struct cntr_entry *entry,
2318                                 void *context, int vl, int mode, u64 data)
2319 {
2320         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2321
2322         return dd->rcv_err_status_cnt[42];
2323 }
2324
2325 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2326                                 const struct cntr_entry *entry,
2327                                 void *context, int vl, int mode, u64 data)
2328 {
2329         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2330
2331         return dd->rcv_err_status_cnt[41];
2332 }
2333
2334 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2335                                 const struct cntr_entry *entry,
2336                                 void *context, int vl, int mode, u64 data)
2337 {
2338         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2339
2340         return dd->rcv_err_status_cnt[40];
2341 }
2342
2343 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2344                                 const struct cntr_entry *entry,
2345                                 void *context, int vl, int mode, u64 data)
2346 {
2347         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2348
2349         return dd->rcv_err_status_cnt[39];
2350 }
2351
2352 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2353                                 const struct cntr_entry *entry,
2354                                 void *context, int vl, int mode, u64 data)
2355 {
2356         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2357
2358         return dd->rcv_err_status_cnt[38];
2359 }
2360
2361 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2362                                 const struct cntr_entry *entry,
2363                                 void *context, int vl, int mode, u64 data)
2364 {
2365         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2366
2367         return dd->rcv_err_status_cnt[37];
2368 }
2369
2370 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2371                                 const struct cntr_entry *entry,
2372                                 void *context, int vl, int mode, u64 data)
2373 {
2374         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2375
2376         return dd->rcv_err_status_cnt[36];
2377 }
2378
2379 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2380                                 const struct cntr_entry *entry,
2381                                 void *context, int vl, int mode, u64 data)
2382 {
2383         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2384
2385         return dd->rcv_err_status_cnt[35];
2386 }
2387
2388 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2389                                 const struct cntr_entry *entry,
2390                                 void *context, int vl, int mode, u64 data)
2391 {
2392         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2393
2394         return dd->rcv_err_status_cnt[34];
2395 }
2396
2397 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2398                                 const struct cntr_entry *entry,
2399                                 void *context, int vl, int mode, u64 data)
2400 {
2401         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2402
2403         return dd->rcv_err_status_cnt[33];
2404 }
2405
2406 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2407                                         void *context, int vl, int mode,
2408                                         u64 data)
2409 {
2410         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2411
2412         return dd->rcv_err_status_cnt[32];
2413 }
2414
2415 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2416                                        void *context, int vl, int mode,
2417                                        u64 data)
2418 {
2419         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2420
2421         return dd->rcv_err_status_cnt[31];
2422 }
2423
2424 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2425                                           void *context, int vl, int mode,
2426                                           u64 data)
2427 {
2428         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2429
2430         return dd->rcv_err_status_cnt[30];
2431 }
2432
2433 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2434                                              void *context, int vl, int mode,
2435                                              u64 data)
2436 {
2437         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2438
2439         return dd->rcv_err_status_cnt[29];
2440 }
2441
2442 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2443                                                  void *context, int vl,
2444                                                  int mode, u64 data)
2445 {
2446         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2447
2448         return dd->rcv_err_status_cnt[28];
2449 }
2450
2451 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2452                                 const struct cntr_entry *entry,
2453                                 void *context, int vl, int mode, u64 data)
2454 {
2455         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2456
2457         return dd->rcv_err_status_cnt[27];
2458 }
2459
2460 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2461                                 const struct cntr_entry *entry,
2462                                 void *context, int vl, int mode, u64 data)
2463 {
2464         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2465
2466         return dd->rcv_err_status_cnt[26];
2467 }
2468
2469 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2470                                 const struct cntr_entry *entry,
2471                                 void *context, int vl, int mode, u64 data)
2472 {
2473         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2474
2475         return dd->rcv_err_status_cnt[25];
2476 }
2477
2478 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2479                                 const struct cntr_entry *entry,
2480                                 void *context, int vl, int mode, u64 data)
2481 {
2482         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2483
2484         return dd->rcv_err_status_cnt[24];
2485 }
2486
2487 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2488                                 const struct cntr_entry *entry,
2489                                 void *context, int vl, int mode, u64 data)
2490 {
2491         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2492
2493         return dd->rcv_err_status_cnt[23];
2494 }
2495
2496 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2497                                 const struct cntr_entry *entry,
2498                                 void *context, int vl, int mode, u64 data)
2499 {
2500         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2501
2502         return dd->rcv_err_status_cnt[22];
2503 }
2504
2505 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2506                                 const struct cntr_entry *entry,
2507                                 void *context, int vl, int mode, u64 data)
2508 {
2509         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2510
2511         return dd->rcv_err_status_cnt[21];
2512 }
2513
2514 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2515                                 const struct cntr_entry *entry,
2516                                 void *context, int vl, int mode, u64 data)
2517 {
2518         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2519
2520         return dd->rcv_err_status_cnt[20];
2521 }
2522
2523 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2524                                 const struct cntr_entry *entry,
2525                                 void *context, int vl, int mode, u64 data)
2526 {
2527         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2528
2529         return dd->rcv_err_status_cnt[19];
2530 }
2531
2532 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2533                                                  void *context, int vl,
2534                                                  int mode, u64 data)
2535 {
2536         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2537
2538         return dd->rcv_err_status_cnt[18];
2539 }
2540
2541 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2542                                                  void *context, int vl,
2543                                                  int mode, u64 data)
2544 {
2545         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2546
2547         return dd->rcv_err_status_cnt[17];
2548 }
2549
2550 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2551                                 const struct cntr_entry *entry,
2552                                 void *context, int vl, int mode, u64 data)
2553 {
2554         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2555
2556         return dd->rcv_err_status_cnt[16];
2557 }
2558
2559 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2560                                 const struct cntr_entry *entry,
2561                                 void *context, int vl, int mode, u64 data)
2562 {
2563         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2564
2565         return dd->rcv_err_status_cnt[15];
2566 }
2567
2568 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2569                                                 void *context, int vl,
2570                                                 int mode, u64 data)
2571 {
2572         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2573
2574         return dd->rcv_err_status_cnt[14];
2575 }
2576
2577 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2578                                                 void *context, int vl,
2579                                                 int mode, u64 data)
2580 {
2581         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2582
2583         return dd->rcv_err_status_cnt[13];
2584 }
2585
2586 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2587                                               void *context, int vl, int mode,
2588                                               u64 data)
2589 {
2590         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2591
2592         return dd->rcv_err_status_cnt[12];
2593 }
2594
2595 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2596                                           void *context, int vl, int mode,
2597                                           u64 data)
2598 {
2599         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2600
2601         return dd->rcv_err_status_cnt[11];
2602 }
2603
2604 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2605                                           void *context, int vl, int mode,
2606                                           u64 data)
2607 {
2608         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2609
2610         return dd->rcv_err_status_cnt[10];
2611 }
2612
2613 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2614                                                void *context, int vl, int mode,
2615                                                u64 data)
2616 {
2617         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2618
2619         return dd->rcv_err_status_cnt[9];
2620 }
2621
2622 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2623                                             void *context, int vl, int mode,
2624                                             u64 data)
2625 {
2626         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2627
2628         return dd->rcv_err_status_cnt[8];
2629 }
2630
2631 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2632                                 const struct cntr_entry *entry,
2633                                 void *context, int vl, int mode, u64 data)
2634 {
2635         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2636
2637         return dd->rcv_err_status_cnt[7];
2638 }
2639
2640 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2641                                 const struct cntr_entry *entry,
2642                                 void *context, int vl, int mode, u64 data)
2643 {
2644         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2645
2646         return dd->rcv_err_status_cnt[6];
2647 }
2648
2649 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2650                                           void *context, int vl, int mode,
2651                                           u64 data)
2652 {
2653         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2654
2655         return dd->rcv_err_status_cnt[5];
2656 }
2657
2658 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2659                                           void *context, int vl, int mode,
2660                                           u64 data)
2661 {
2662         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2663
2664         return dd->rcv_err_status_cnt[4];
2665 }
2666
2667 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2668                                          void *context, int vl, int mode,
2669                                          u64 data)
2670 {
2671         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2672
2673         return dd->rcv_err_status_cnt[3];
2674 }
2675
2676 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2677                                          void *context, int vl, int mode,
2678                                          u64 data)
2679 {
2680         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2681
2682         return dd->rcv_err_status_cnt[2];
2683 }
2684
2685 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2686                                             void *context, int vl, int mode,
2687                                             u64 data)
2688 {
2689         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2690
2691         return dd->rcv_err_status_cnt[1];
2692 }
2693
2694 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2695                                          void *context, int vl, int mode,
2696                                          u64 data)
2697 {
2698         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2699
2700         return dd->rcv_err_status_cnt[0];
2701 }
2702
2703 /*
2704  * Software counters corresponding to each of the
2705  * error status bits within SendPioErrStatus
2706  */
2707 static u64 access_pio_pec_sop_head_parity_err_cnt(
2708                                 const struct cntr_entry *entry,
2709                                 void *context, int vl, int mode, u64 data)
2710 {
2711         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2712
2713         return dd->send_pio_err_status_cnt[35];
2714 }
2715
2716 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2717                                 const struct cntr_entry *entry,
2718                                 void *context, int vl, int mode, u64 data)
2719 {
2720         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2721
2722         return dd->send_pio_err_status_cnt[34];
2723 }
2724
2725 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2726                                 const struct cntr_entry *entry,
2727                                 void *context, int vl, int mode, u64 data)
2728 {
2729         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2730
2731         return dd->send_pio_err_status_cnt[33];
2732 }
2733
2734 static u64 access_pio_current_free_cnt_parity_err_cnt(
2735                                 const struct cntr_entry *entry,
2736                                 void *context, int vl, int mode, u64 data)
2737 {
2738         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2739
2740         return dd->send_pio_err_status_cnt[32];
2741 }
2742
2743 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2744                                           void *context, int vl, int mode,
2745                                           u64 data)
2746 {
2747         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2748
2749         return dd->send_pio_err_status_cnt[31];
2750 }
2751
2752 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2753                                           void *context, int vl, int mode,
2754                                           u64 data)
2755 {
2756         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2757
2758         return dd->send_pio_err_status_cnt[30];
2759 }
2760
2761 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2762                                            void *context, int vl, int mode,
2763                                            u64 data)
2764 {
2765         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2766
2767         return dd->send_pio_err_status_cnt[29];
2768 }
2769
2770 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2771                                 const struct cntr_entry *entry,
2772                                 void *context, int vl, int mode, u64 data)
2773 {
2774         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2775
2776         return dd->send_pio_err_status_cnt[28];
2777 }
2778
2779 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2780                                              void *context, int vl, int mode,
2781                                              u64 data)
2782 {
2783         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2784
2785         return dd->send_pio_err_status_cnt[27];
2786 }
2787
2788 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2789                                              void *context, int vl, int mode,
2790                                              u64 data)
2791 {
2792         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2793
2794         return dd->send_pio_err_status_cnt[26];
2795 }
2796
2797 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2798                                                 void *context, int vl,
2799                                                 int mode, u64 data)
2800 {
2801         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2802
2803         return dd->send_pio_err_status_cnt[25];
2804 }
2805
2806 static u64 access_pio_block_qw_count_parity_err_cnt(
2807                                 const struct cntr_entry *entry,
2808                                 void *context, int vl, int mode, u64 data)
2809 {
2810         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2811
2812         return dd->send_pio_err_status_cnt[24];
2813 }
2814
2815 static u64 access_pio_write_qw_valid_parity_err_cnt(
2816                                 const struct cntr_entry *entry,
2817                                 void *context, int vl, int mode, u64 data)
2818 {
2819         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2820
2821         return dd->send_pio_err_status_cnt[23];
2822 }
2823
2824 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2825                                             void *context, int vl, int mode,
2826                                             u64 data)
2827 {
2828         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2829
2830         return dd->send_pio_err_status_cnt[22];
2831 }
2832
2833 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2834                                                 void *context, int vl,
2835                                                 int mode, u64 data)
2836 {
2837         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2838
2839         return dd->send_pio_err_status_cnt[21];
2840 }
2841
2842 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2843                                                 void *context, int vl,
2844                                                 int mode, u64 data)
2845 {
2846         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2847
2848         return dd->send_pio_err_status_cnt[20];
2849 }
2850
2851 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2852                                                 void *context, int vl,
2853                                                 int mode, u64 data)
2854 {
2855         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2856
2857         return dd->send_pio_err_status_cnt[19];
2858 }
2859
2860 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2861                                 const struct cntr_entry *entry,
2862                                 void *context, int vl, int mode, u64 data)
2863 {
2864         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2865
2866         return dd->send_pio_err_status_cnt[18];
2867 }
2868
2869 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2870                                          void *context, int vl, int mode,
2871                                          u64 data)
2872 {
2873         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2874
2875         return dd->send_pio_err_status_cnt[17];
2876 }
2877
2878 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2879                                             void *context, int vl, int mode,
2880                                             u64 data)
2881 {
2882         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2883
2884         return dd->send_pio_err_status_cnt[16];
2885 }
2886
2887 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2888                                 const struct cntr_entry *entry,
2889                                 void *context, int vl, int mode, u64 data)
2890 {
2891         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2892
2893         return dd->send_pio_err_status_cnt[15];
2894 }
2895
2896 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2897                                 const struct cntr_entry *entry,
2898                                 void *context, int vl, int mode, u64 data)
2899 {
2900         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2901
2902         return dd->send_pio_err_status_cnt[14];
2903 }
2904
2905 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2906                                 const struct cntr_entry *entry,
2907                                 void *context, int vl, int mode, u64 data)
2908 {
2909         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2910
2911         return dd->send_pio_err_status_cnt[13];
2912 }
2913
2914 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
2915                                 const struct cntr_entry *entry,
2916                                 void *context, int vl, int mode, u64 data)
2917 {
2918         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2919
2920         return dd->send_pio_err_status_cnt[12];
2921 }
2922
2923 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
2924                                 const struct cntr_entry *entry,
2925                                 void *context, int vl, int mode, u64 data)
2926 {
2927         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2928
2929         return dd->send_pio_err_status_cnt[11];
2930 }
2931
2932 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
2933                                 const struct cntr_entry *entry,
2934                                 void *context, int vl, int mode, u64 data)
2935 {
2936         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2937
2938         return dd->send_pio_err_status_cnt[10];
2939 }
2940
2941 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
2942                                 const struct cntr_entry *entry,
2943                                 void *context, int vl, int mode, u64 data)
2944 {
2945         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2946
2947         return dd->send_pio_err_status_cnt[9];
2948 }
2949
2950 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
2951                                 const struct cntr_entry *entry,
2952                                 void *context, int vl, int mode, u64 data)
2953 {
2954         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2955
2956         return dd->send_pio_err_status_cnt[8];
2957 }
2958
2959 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
2960                                 const struct cntr_entry *entry,
2961                                 void *context, int vl, int mode, u64 data)
2962 {
2963         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2964
2965         return dd->send_pio_err_status_cnt[7];
2966 }
2967
2968 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
2969                                               void *context, int vl, int mode,
2970                                               u64 data)
2971 {
2972         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2973
2974         return dd->send_pio_err_status_cnt[6];
2975 }
2976
2977 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
2978                                               void *context, int vl, int mode,
2979                                               u64 data)
2980 {
2981         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2982
2983         return dd->send_pio_err_status_cnt[5];
2984 }
2985
2986 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
2987                                            void *context, int vl, int mode,
2988                                            u64 data)
2989 {
2990         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2991
2992         return dd->send_pio_err_status_cnt[4];
2993 }
2994
2995 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
2996                                            void *context, int vl, int mode,
2997                                            u64 data)
2998 {
2999         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3000
3001         return dd->send_pio_err_status_cnt[3];
3002 }
3003
3004 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3005                                          void *context, int vl, int mode,
3006                                          u64 data)
3007 {
3008         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3009
3010         return dd->send_pio_err_status_cnt[2];
3011 }
3012
3013 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3014                                                 void *context, int vl,
3015                                                 int mode, u64 data)
3016 {
3017         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3018
3019         return dd->send_pio_err_status_cnt[1];
3020 }
3021
3022 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3023                                              void *context, int vl, int mode,
3024                                              u64 data)
3025 {
3026         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3027
3028         return dd->send_pio_err_status_cnt[0];
3029 }
3030
3031 /*
3032  * Software counters corresponding to each of the
3033  * error status bits within SendDmaErrStatus
3034  */
3035 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3036                                 const struct cntr_entry *entry,
3037                                 void *context, int vl, int mode, u64 data)
3038 {
3039         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3040
3041         return dd->send_dma_err_status_cnt[3];
3042 }
3043
3044 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3045                                 const struct cntr_entry *entry,
3046                                 void *context, int vl, int mode, u64 data)
3047 {
3048         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3049
3050         return dd->send_dma_err_status_cnt[2];
3051 }
3052
3053 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3054                                           void *context, int vl, int mode,
3055                                           u64 data)
3056 {
3057         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3058
3059         return dd->send_dma_err_status_cnt[1];
3060 }
3061
3062 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3063                                        void *context, int vl, int mode,
3064                                        u64 data)
3065 {
3066         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3067
3068         return dd->send_dma_err_status_cnt[0];
3069 }
3070
3071 /*
3072  * Software counters corresponding to each of the
3073  * error status bits within SendEgressErrStatus
3074  */
3075 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3076                                 const struct cntr_entry *entry,
3077                                 void *context, int vl, int mode, u64 data)
3078 {
3079         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3080
3081         return dd->send_egress_err_status_cnt[63];
3082 }
3083
3084 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3085                                 const struct cntr_entry *entry,
3086                                 void *context, int vl, int mode, u64 data)
3087 {
3088         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3089
3090         return dd->send_egress_err_status_cnt[62];
3091 }
3092
3093 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3094                                              void *context, int vl, int mode,
3095                                              u64 data)
3096 {
3097         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3098
3099         return dd->send_egress_err_status_cnt[61];
3100 }
3101
3102 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3103                                                  void *context, int vl,
3104                                                  int mode, u64 data)
3105 {
3106         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3107
3108         return dd->send_egress_err_status_cnt[60];
3109 }
3110
3111 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3112                                 const struct cntr_entry *entry,
3113                                 void *context, int vl, int mode, u64 data)
3114 {
3115         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3116
3117         return dd->send_egress_err_status_cnt[59];
3118 }
3119
3120 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3121                                         void *context, int vl, int mode,
3122                                         u64 data)
3123 {
3124         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3125
3126         return dd->send_egress_err_status_cnt[58];
3127 }
3128
3129 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3130                                             void *context, int vl, int mode,
3131                                             u64 data)
3132 {
3133         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3134
3135         return dd->send_egress_err_status_cnt[57];
3136 }
3137
3138 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3139                                               void *context, int vl, int mode,
3140                                               u64 data)
3141 {
3142         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3143
3144         return dd->send_egress_err_status_cnt[56];
3145 }
3146
3147 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3148                                               void *context, int vl, int mode,
3149                                               u64 data)
3150 {
3151         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3152
3153         return dd->send_egress_err_status_cnt[55];
3154 }
3155
3156 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3157                                               void *context, int vl, int mode,
3158                                               u64 data)
3159 {
3160         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3161
3162         return dd->send_egress_err_status_cnt[54];
3163 }
3164
3165 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3166                                               void *context, int vl, int mode,
3167                                               u64 data)
3168 {
3169         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3170
3171         return dd->send_egress_err_status_cnt[53];
3172 }
3173
3174 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3175                                               void *context, int vl, int mode,
3176                                               u64 data)
3177 {
3178         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3179
3180         return dd->send_egress_err_status_cnt[52];
3181 }
3182
3183 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3184                                               void *context, int vl, int mode,
3185                                               u64 data)
3186 {
3187         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3188
3189         return dd->send_egress_err_status_cnt[51];
3190 }
3191
3192 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3193                                               void *context, int vl, int mode,
3194                                               u64 data)
3195 {
3196         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3197
3198         return dd->send_egress_err_status_cnt[50];
3199 }
3200
3201 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3202                                               void *context, int vl, int mode,
3203                                               u64 data)
3204 {
3205         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3206
3207         return dd->send_egress_err_status_cnt[49];
3208 }
3209
3210 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3211                                               void *context, int vl, int mode,
3212                                               u64 data)
3213 {
3214         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3215
3216         return dd->send_egress_err_status_cnt[48];
3217 }
3218
3219 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3220                                               void *context, int vl, int mode,
3221                                               u64 data)
3222 {
3223         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3224
3225         return dd->send_egress_err_status_cnt[47];
3226 }
3227
3228 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3229                                             void *context, int vl, int mode,
3230                                             u64 data)
3231 {
3232         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3233
3234         return dd->send_egress_err_status_cnt[46];
3235 }
3236
3237 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3238                                              void *context, int vl, int mode,
3239                                              u64 data)
3240 {
3241         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3242
3243         return dd->send_egress_err_status_cnt[45];
3244 }
3245
3246 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3247                                                  void *context, int vl,
3248                                                  int mode, u64 data)
3249 {
3250         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3251
3252         return dd->send_egress_err_status_cnt[44];
3253 }
3254
3255 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3256                                 const struct cntr_entry *entry,
3257                                 void *context, int vl, int mode, u64 data)
3258 {
3259         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3260
3261         return dd->send_egress_err_status_cnt[43];
3262 }
3263
3264 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3265                                         void *context, int vl, int mode,
3266                                         u64 data)
3267 {
3268         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3269
3270         return dd->send_egress_err_status_cnt[42];
3271 }
3272
3273 static u64 access_tx_credit_return_partiy_err_cnt(
3274                                 const struct cntr_entry *entry,
3275                                 void *context, int vl, int mode, u64 data)
3276 {
3277         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3278
3279         return dd->send_egress_err_status_cnt[41];
3280 }
3281
3282 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3283                                 const struct cntr_entry *entry,
3284                                 void *context, int vl, int mode, u64 data)
3285 {
3286         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3287
3288         return dd->send_egress_err_status_cnt[40];
3289 }
3290
3291 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3292                                 const struct cntr_entry *entry,
3293                                 void *context, int vl, int mode, u64 data)
3294 {
3295         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3296
3297         return dd->send_egress_err_status_cnt[39];
3298 }
3299
3300 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3301                                 const struct cntr_entry *entry,
3302                                 void *context, int vl, int mode, u64 data)
3303 {
3304         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3305
3306         return dd->send_egress_err_status_cnt[38];
3307 }
3308
3309 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3310                                 const struct cntr_entry *entry,
3311                                 void *context, int vl, int mode, u64 data)
3312 {
3313         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3314
3315         return dd->send_egress_err_status_cnt[37];
3316 }
3317
3318 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3319                                 const struct cntr_entry *entry,
3320                                 void *context, int vl, int mode, u64 data)
3321 {
3322         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3323
3324         return dd->send_egress_err_status_cnt[36];
3325 }
3326
3327 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3328                                 const struct cntr_entry *entry,
3329                                 void *context, int vl, int mode, u64 data)
3330 {
3331         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3332
3333         return dd->send_egress_err_status_cnt[35];
3334 }
3335
3336 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3337                                 const struct cntr_entry *entry,
3338                                 void *context, int vl, int mode, u64 data)
3339 {
3340         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3341
3342         return dd->send_egress_err_status_cnt[34];
3343 }
3344
3345 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3346                                 const struct cntr_entry *entry,
3347                                 void *context, int vl, int mode, u64 data)
3348 {
3349         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3350
3351         return dd->send_egress_err_status_cnt[33];
3352 }
3353
3354 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3355                                 const struct cntr_entry *entry,
3356                                 void *context, int vl, int mode, u64 data)
3357 {
3358         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3359
3360         return dd->send_egress_err_status_cnt[32];
3361 }
3362
3363 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3364                                 const struct cntr_entry *entry,
3365                                 void *context, int vl, int mode, u64 data)
3366 {
3367         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3368
3369         return dd->send_egress_err_status_cnt[31];
3370 }
3371
3372 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3373                                 const struct cntr_entry *entry,
3374                                 void *context, int vl, int mode, u64 data)
3375 {
3376         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3377
3378         return dd->send_egress_err_status_cnt[30];
3379 }
3380
3381 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3382                                 const struct cntr_entry *entry,
3383                                 void *context, int vl, int mode, u64 data)
3384 {
3385         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3386
3387         return dd->send_egress_err_status_cnt[29];
3388 }
3389
3390 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3391                                 const struct cntr_entry *entry,
3392                                 void *context, int vl, int mode, u64 data)
3393 {
3394         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3395
3396         return dd->send_egress_err_status_cnt[28];
3397 }
3398
3399 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3400                                 const struct cntr_entry *entry,
3401                                 void *context, int vl, int mode, u64 data)
3402 {
3403         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3404
3405         return dd->send_egress_err_status_cnt[27];
3406 }
3407
3408 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3409                                 const struct cntr_entry *entry,
3410                                 void *context, int vl, int mode, u64 data)
3411 {
3412         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3413
3414         return dd->send_egress_err_status_cnt[26];
3415 }
3416
3417 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3418                                 const struct cntr_entry *entry,
3419                                 void *context, int vl, int mode, u64 data)
3420 {
3421         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3422
3423         return dd->send_egress_err_status_cnt[25];
3424 }
3425
3426 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3427                                 const struct cntr_entry *entry,
3428                                 void *context, int vl, int mode, u64 data)
3429 {
3430         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3431
3432         return dd->send_egress_err_status_cnt[24];
3433 }
3434
3435 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3436                                 const struct cntr_entry *entry,
3437                                 void *context, int vl, int mode, u64 data)
3438 {
3439         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3440
3441         return dd->send_egress_err_status_cnt[23];
3442 }
3443
3444 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3445                                 const struct cntr_entry *entry,
3446                                 void *context, int vl, int mode, u64 data)
3447 {
3448         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3449
3450         return dd->send_egress_err_status_cnt[22];
3451 }
3452
3453 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3454                                 const struct cntr_entry *entry,
3455                                 void *context, int vl, int mode, u64 data)
3456 {
3457         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3458
3459         return dd->send_egress_err_status_cnt[21];
3460 }
3461
3462 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3463                                 const struct cntr_entry *entry,
3464                                 void *context, int vl, int mode, u64 data)
3465 {
3466         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3467
3468         return dd->send_egress_err_status_cnt[20];
3469 }
3470
3471 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3472                                 const struct cntr_entry *entry,
3473                                 void *context, int vl, int mode, u64 data)
3474 {
3475         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3476
3477         return dd->send_egress_err_status_cnt[19];
3478 }
3479
3480 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3481                                 const struct cntr_entry *entry,
3482                                 void *context, int vl, int mode, u64 data)
3483 {
3484         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3485
3486         return dd->send_egress_err_status_cnt[18];
3487 }
3488
3489 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3490                                 const struct cntr_entry *entry,
3491                                 void *context, int vl, int mode, u64 data)
3492 {
3493         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3494
3495         return dd->send_egress_err_status_cnt[17];
3496 }
3497
3498 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3499                                 const struct cntr_entry *entry,
3500                                 void *context, int vl, int mode, u64 data)
3501 {
3502         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3503
3504         return dd->send_egress_err_status_cnt[16];
3505 }
3506
3507 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3508                                            void *context, int vl, int mode,
3509                                            u64 data)
3510 {
3511         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3512
3513         return dd->send_egress_err_status_cnt[15];
3514 }
3515
3516 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3517                                                  void *context, int vl,
3518                                                  int mode, u64 data)
3519 {
3520         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3521
3522         return dd->send_egress_err_status_cnt[14];
3523 }
3524
3525 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3526                                                void *context, int vl, int mode,
3527                                                u64 data)
3528 {
3529         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3530
3531         return dd->send_egress_err_status_cnt[13];
3532 }
3533
3534 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3535                                         void *context, int vl, int mode,
3536                                         u64 data)
3537 {
3538         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3539
3540         return dd->send_egress_err_status_cnt[12];
3541 }
3542
3543 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3544                                 const struct cntr_entry *entry,
3545                                 void *context, int vl, int mode, u64 data)
3546 {
3547         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3548
3549         return dd->send_egress_err_status_cnt[11];
3550 }
3551
3552 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3553                                              void *context, int vl, int mode,
3554                                              u64 data)
3555 {
3556         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3557
3558         return dd->send_egress_err_status_cnt[10];
3559 }
3560
3561 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3562                                             void *context, int vl, int mode,
3563                                             u64 data)
3564 {
3565         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3566
3567         return dd->send_egress_err_status_cnt[9];
3568 }
3569
3570 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3571                                 const struct cntr_entry *entry,
3572                                 void *context, int vl, int mode, u64 data)
3573 {
3574         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3575
3576         return dd->send_egress_err_status_cnt[8];
3577 }
3578
3579 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3580                                 const struct cntr_entry *entry,
3581                                 void *context, int vl, int mode, u64 data)
3582 {
3583         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3584
3585         return dd->send_egress_err_status_cnt[7];
3586 }
3587
3588 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3589                                             void *context, int vl, int mode,
3590                                             u64 data)
3591 {
3592         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3593
3594         return dd->send_egress_err_status_cnt[6];
3595 }
3596
3597 static u64 access_tx_incorrect_link_state_err_cnt(
3598                                 const struct cntr_entry *entry,
3599                                 void *context, int vl, int mode, u64 data)
3600 {
3601         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3602
3603         return dd->send_egress_err_status_cnt[5];
3604 }
3605
3606 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3607                                       void *context, int vl, int mode,
3608                                       u64 data)
3609 {
3610         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3611
3612         return dd->send_egress_err_status_cnt[4];
3613 }
3614
3615 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3616                                 const struct cntr_entry *entry,
3617                                 void *context, int vl, int mode, u64 data)
3618 {
3619         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3620
3621         return dd->send_egress_err_status_cnt[3];
3622 }
3623
3624 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3625                                             void *context, int vl, int mode,
3626                                             u64 data)
3627 {
3628         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3629
3630         return dd->send_egress_err_status_cnt[2];
3631 }
3632
3633 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3634                                 const struct cntr_entry *entry,
3635                                 void *context, int vl, int mode, u64 data)
3636 {
3637         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3638
3639         return dd->send_egress_err_status_cnt[1];
3640 }
3641
3642 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3643                                 const struct cntr_entry *entry,
3644                                 void *context, int vl, int mode, u64 data)
3645 {
3646         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3647
3648         return dd->send_egress_err_status_cnt[0];
3649 }
3650
3651 /*
3652  * Software counters corresponding to each of the
3653  * error status bits within SendErrStatus
3654  */
3655 static u64 access_send_csr_write_bad_addr_err_cnt(
3656                                 const struct cntr_entry *entry,
3657                                 void *context, int vl, int mode, u64 data)
3658 {
3659         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3660
3661         return dd->send_err_status_cnt[2];
3662 }
3663
3664 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3665                                                  void *context, int vl,
3666                                                  int mode, u64 data)
3667 {
3668         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3669
3670         return dd->send_err_status_cnt[1];
3671 }
3672
3673 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3674                                       void *context, int vl, int mode,
3675                                       u64 data)
3676 {
3677         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3678
3679         return dd->send_err_status_cnt[0];
3680 }
3681
3682 /*
3683  * Software counters corresponding to each of the
3684  * error status bits within SendCtxtErrStatus
3685  */
3686 static u64 access_pio_write_out_of_bounds_err_cnt(
3687                                 const struct cntr_entry *entry,
3688                                 void *context, int vl, int mode, u64 data)
3689 {
3690         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3691
3692         return dd->sw_ctxt_err_status_cnt[4];
3693 }
3694
3695 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3696                                              void *context, int vl, int mode,
3697                                              u64 data)
3698 {
3699         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3700
3701         return dd->sw_ctxt_err_status_cnt[3];
3702 }
3703
3704 static u64 access_pio_write_crosses_boundary_err_cnt(
3705                                 const struct cntr_entry *entry,
3706                                 void *context, int vl, int mode, u64 data)
3707 {
3708         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3709
3710         return dd->sw_ctxt_err_status_cnt[2];
3711 }
3712
3713 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3714                                                 void *context, int vl,
3715                                                 int mode, u64 data)
3716 {
3717         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3718
3719         return dd->sw_ctxt_err_status_cnt[1];
3720 }
3721
3722 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3723                                                void *context, int vl, int mode,
3724                                                u64 data)
3725 {
3726         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3727
3728         return dd->sw_ctxt_err_status_cnt[0];
3729 }
3730
3731 /*
3732  * Software counters corresponding to each of the
3733  * error status bits within SendDmaEngErrStatus
3734  */
3735 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3736                                 const struct cntr_entry *entry,
3737                                 void *context, int vl, int mode, u64 data)
3738 {
3739         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740
3741         return dd->sw_send_dma_eng_err_status_cnt[23];
3742 }
3743
3744 static u64 access_sdma_header_storage_cor_err_cnt(
3745                                 const struct cntr_entry *entry,
3746                                 void *context, int vl, int mode, u64 data)
3747 {
3748         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3749
3750         return dd->sw_send_dma_eng_err_status_cnt[22];
3751 }
3752
3753 static u64 access_sdma_packet_tracking_cor_err_cnt(
3754                                 const struct cntr_entry *entry,
3755                                 void *context, int vl, int mode, u64 data)
3756 {
3757         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3758
3759         return dd->sw_send_dma_eng_err_status_cnt[21];
3760 }
3761
3762 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3763                                             void *context, int vl, int mode,
3764                                             u64 data)
3765 {
3766         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3767
3768         return dd->sw_send_dma_eng_err_status_cnt[20];
3769 }
3770
3771 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3772                                               void *context, int vl, int mode,
3773                                               u64 data)
3774 {
3775         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3776
3777         return dd->sw_send_dma_eng_err_status_cnt[19];
3778 }
3779
3780 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3781                                 const struct cntr_entry *entry,
3782                                 void *context, int vl, int mode, u64 data)
3783 {
3784         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3785
3786         return dd->sw_send_dma_eng_err_status_cnt[18];
3787 }
3788
3789 static u64 access_sdma_header_storage_unc_err_cnt(
3790                                 const struct cntr_entry *entry,
3791                                 void *context, int vl, int mode, u64 data)
3792 {
3793         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3794
3795         return dd->sw_send_dma_eng_err_status_cnt[17];
3796 }
3797
3798 static u64 access_sdma_packet_tracking_unc_err_cnt(
3799                                 const struct cntr_entry *entry,
3800                                 void *context, int vl, int mode, u64 data)
3801 {
3802         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3803
3804         return dd->sw_send_dma_eng_err_status_cnt[16];
3805 }
3806
3807 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3808                                             void *context, int vl, int mode,
3809                                             u64 data)
3810 {
3811         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3812
3813         return dd->sw_send_dma_eng_err_status_cnt[15];
3814 }
3815
3816 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3817                                               void *context, int vl, int mode,
3818                                               u64 data)
3819 {
3820         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3821
3822         return dd->sw_send_dma_eng_err_status_cnt[14];
3823 }
3824
3825 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3826                                        void *context, int vl, int mode,
3827                                        u64 data)
3828 {
3829         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3830
3831         return dd->sw_send_dma_eng_err_status_cnt[13];
3832 }
3833
3834 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3835                                              void *context, int vl, int mode,
3836                                              u64 data)
3837 {
3838         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3839
3840         return dd->sw_send_dma_eng_err_status_cnt[12];
3841 }
3842
3843 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3844                                               void *context, int vl, int mode,
3845                                               u64 data)
3846 {
3847         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3848
3849         return dd->sw_send_dma_eng_err_status_cnt[11];
3850 }
3851
3852 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3853                                              void *context, int vl, int mode,
3854                                              u64 data)
3855 {
3856         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3857
3858         return dd->sw_send_dma_eng_err_status_cnt[10];
3859 }
3860
3861 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3862                                           void *context, int vl, int mode,
3863                                           u64 data)
3864 {
3865         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3866
3867         return dd->sw_send_dma_eng_err_status_cnt[9];
3868 }
3869
3870 static u64 access_sdma_packet_desc_overflow_err_cnt(
3871                                 const struct cntr_entry *entry,
3872                                 void *context, int vl, int mode, u64 data)
3873 {
3874         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3875
3876         return dd->sw_send_dma_eng_err_status_cnt[8];
3877 }
3878
3879 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3880                                                void *context, int vl,
3881                                                int mode, u64 data)
3882 {
3883         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3884
3885         return dd->sw_send_dma_eng_err_status_cnt[7];
3886 }
3887
3888 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3889                                     void *context, int vl, int mode, u64 data)
3890 {
3891         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3892
3893         return dd->sw_send_dma_eng_err_status_cnt[6];
3894 }
3895
3896 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3897                                         void *context, int vl, int mode,
3898                                         u64 data)
3899 {
3900         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3901
3902         return dd->sw_send_dma_eng_err_status_cnt[5];
3903 }
3904
3905 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3906                                           void *context, int vl, int mode,
3907                                           u64 data)
3908 {
3909         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3910
3911         return dd->sw_send_dma_eng_err_status_cnt[4];
3912 }
3913
3914 static u64 access_sdma_tail_out_of_bounds_err_cnt(
3915                                 const struct cntr_entry *entry,
3916                                 void *context, int vl, int mode, u64 data)
3917 {
3918         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3919
3920         return dd->sw_send_dma_eng_err_status_cnt[3];
3921 }
3922
3923 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
3924                                         void *context, int vl, int mode,
3925                                         u64 data)
3926 {
3927         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3928
3929         return dd->sw_send_dma_eng_err_status_cnt[2];
3930 }
3931
3932 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
3933                                             void *context, int vl, int mode,
3934                                             u64 data)
3935 {
3936         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3937
3938         return dd->sw_send_dma_eng_err_status_cnt[1];
3939 }
3940
3941 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
3942                                         void *context, int vl, int mode,
3943                                         u64 data)
3944 {
3945         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3946
3947         return dd->sw_send_dma_eng_err_status_cnt[0];
3948 }
3949
3950 #define def_access_sw_cpu(cntr) \
3951 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
3952                               void *context, int vl, int mode, u64 data)      \
3953 {                                                                             \
3954         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
3955         return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
3956                               ppd->ibport_data.rvp.cntr, vl,                  \
3957                               mode, data);                                    \
3958 }
3959
3960 def_access_sw_cpu(rc_acks);
3961 def_access_sw_cpu(rc_qacks);
3962 def_access_sw_cpu(rc_delayed_comp);
3963
3964 #define def_access_ibp_counter(cntr) \
3965 static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
3966                                 void *context, int vl, int mode, u64 data)    \
3967 {                                                                             \
3968         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
3969                                                                               \
3970         if (vl != CNTR_INVALID_VL)                                            \
3971                 return 0;                                                     \
3972                                                                               \
3973         return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
3974                              mode, data);                                     \
3975 }
3976
3977 def_access_ibp_counter(loop_pkts);
3978 def_access_ibp_counter(rc_resends);
3979 def_access_ibp_counter(rnr_naks);
3980 def_access_ibp_counter(other_naks);
3981 def_access_ibp_counter(rc_timeouts);
3982 def_access_ibp_counter(pkt_drops);
3983 def_access_ibp_counter(dmawait);
3984 def_access_ibp_counter(rc_seqnak);
3985 def_access_ibp_counter(rc_dupreq);
3986 def_access_ibp_counter(rdma_seq);
3987 def_access_ibp_counter(unaligned);
3988 def_access_ibp_counter(seq_naks);
3989
3990 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
3991 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
3992 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
3993                         CNTR_NORMAL),
3994 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
3995                         CNTR_NORMAL),
3996 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
3997                         RCV_TID_FLOW_GEN_MISMATCH_CNT,
3998                         CNTR_NORMAL),
3999 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4000                         CNTR_NORMAL),
4001 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4002                         RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4003 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4004                         CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4005 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4006                         CNTR_NORMAL),
4007 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4008                         CNTR_NORMAL),
4009 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4010                         CNTR_NORMAL),
4011 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4012                         CNTR_NORMAL),
4013 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4014                         CNTR_NORMAL),
4015 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4016                         CNTR_NORMAL),
4017 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4018                         CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4019 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4020                         CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4021 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4022                               CNTR_SYNTH),
4023 [C_DC_RCV_ERR] = DC_PERF_CNTR(DcRecvErr, DCC_ERR_PORTRCV_ERR_CNT, CNTR_SYNTH),
4024 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4025                                  CNTR_SYNTH),
4026 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4027                                   CNTR_SYNTH),
4028 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4029                                   CNTR_SYNTH),
4030 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4031                                    DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4032 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4033                                   DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4034                                   CNTR_SYNTH),
4035 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4036                                 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4037 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4038                                CNTR_SYNTH),
4039 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4040                               CNTR_SYNTH),
4041 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4042                                CNTR_SYNTH),
4043 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4044                                  CNTR_SYNTH),
4045 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4046                                 CNTR_SYNTH),
4047 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4048                                 CNTR_SYNTH),
4049 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4050                                CNTR_SYNTH),
4051 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4052                                  CNTR_SYNTH | CNTR_VL),
4053 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4054                                 CNTR_SYNTH | CNTR_VL),
4055 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4056 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4057                                  CNTR_SYNTH | CNTR_VL),
4058 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4059 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4060                                  CNTR_SYNTH | CNTR_VL),
4061 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4062                               CNTR_SYNTH),
4063 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4064                                  CNTR_SYNTH | CNTR_VL),
4065 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4066                                 CNTR_SYNTH),
4067 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4068                                    CNTR_SYNTH | CNTR_VL),
4069 [C_DC_TOTAL_CRC] =
4070         DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4071                          CNTR_SYNTH),
4072 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4073                                   CNTR_SYNTH),
4074 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4075                                   CNTR_SYNTH),
4076 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4077                                   CNTR_SYNTH),
4078 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4079                                   CNTR_SYNTH),
4080 [C_DC_CRC_MULT_LN] =
4081         DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4082                          CNTR_SYNTH),
4083 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4084                                     CNTR_SYNTH),
4085 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4086                                     CNTR_SYNTH),
4087 [C_DC_SEQ_CRC_CNT] =
4088         DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4089                          CNTR_SYNTH),
4090 [C_DC_ESC0_ONLY_CNT] =
4091         DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4092                          CNTR_SYNTH),
4093 [C_DC_ESC0_PLUS1_CNT] =
4094         DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4095                          CNTR_SYNTH),
4096 [C_DC_ESC0_PLUS2_CNT] =
4097         DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4098                          CNTR_SYNTH),
4099 [C_DC_REINIT_FROM_PEER_CNT] =
4100         DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4101                          CNTR_SYNTH),
4102 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4103                                   CNTR_SYNTH),
4104 [C_DC_MISC_FLG_CNT] =
4105         DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4106                          CNTR_SYNTH),
4107 [C_DC_PRF_GOOD_LTP_CNT] =
4108         DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4109 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4110         DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4111                          CNTR_SYNTH),
4112 [C_DC_PRF_RX_FLIT_CNT] =
4113         DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4114 [C_DC_PRF_TX_FLIT_CNT] =
4115         DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4116 [C_DC_PRF_CLK_CNTR] =
4117         DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4118 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4119         DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4120 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4121         DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4122                          CNTR_SYNTH),
4123 [C_DC_PG_STS_TX_SBE_CNT] =
4124         DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4125 [C_DC_PG_STS_TX_MBE_CNT] =
4126         DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4127                          CNTR_SYNTH),
4128 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4129                             access_sw_cpu_intr),
4130 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4131                             access_sw_cpu_rcv_limit),
4132 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4133                             access_sw_vtx_wait),
4134 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4135                             access_sw_pio_wait),
4136 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4137                             access_sw_pio_drain),
4138 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4139                             access_sw_kmem_wait),
4140 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4141                             access_sw_send_schedule),
4142 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4143                                       SEND_DMA_DESC_FETCHED_CNT, 0,
4144                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4145                                       dev_access_u32_csr),
4146 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4147                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4148                              access_sde_int_cnt),
4149 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4150                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4151                              access_sde_err_cnt),
4152 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4153                                   CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4154                                   access_sde_idle_int_cnt),
4155 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4156                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4157                                       access_sde_progress_int_cnt),
4158 /* MISC_ERR_STATUS */
4159 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4160                                 CNTR_NORMAL,
4161                                 access_misc_pll_lock_fail_err_cnt),
4162 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4163                                 CNTR_NORMAL,
4164                                 access_misc_mbist_fail_err_cnt),
4165 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4166                                 CNTR_NORMAL,
4167                                 access_misc_invalid_eep_cmd_err_cnt),
4168 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4169                                 CNTR_NORMAL,
4170                                 access_misc_efuse_done_parity_err_cnt),
4171 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4172                                 CNTR_NORMAL,
4173                                 access_misc_efuse_write_err_cnt),
4174 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4175                                 0, CNTR_NORMAL,
4176                                 access_misc_efuse_read_bad_addr_err_cnt),
4177 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4178                                 CNTR_NORMAL,
4179                                 access_misc_efuse_csr_parity_err_cnt),
4180 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4181                                 CNTR_NORMAL,
4182                                 access_misc_fw_auth_failed_err_cnt),
4183 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4184                                 CNTR_NORMAL,
4185                                 access_misc_key_mismatch_err_cnt),
4186 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4187                                 CNTR_NORMAL,
4188                                 access_misc_sbus_write_failed_err_cnt),
4189 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4190                                 CNTR_NORMAL,
4191                                 access_misc_csr_write_bad_addr_err_cnt),
4192 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4193                                 CNTR_NORMAL,
4194                                 access_misc_csr_read_bad_addr_err_cnt),
4195 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4196                                 CNTR_NORMAL,
4197                                 access_misc_csr_parity_err_cnt),
4198 /* CceErrStatus */
4199 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4200                                 CNTR_NORMAL,
4201                                 access_sw_cce_err_status_aggregated_cnt),
4202 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4203                                 CNTR_NORMAL,
4204                                 access_cce_msix_csr_parity_err_cnt),
4205 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4206                                 CNTR_NORMAL,
4207                                 access_cce_int_map_unc_err_cnt),
4208 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4209                                 CNTR_NORMAL,
4210                                 access_cce_int_map_cor_err_cnt),
4211 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4212                                 CNTR_NORMAL,
4213                                 access_cce_msix_table_unc_err_cnt),
4214 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4215                                 CNTR_NORMAL,
4216                                 access_cce_msix_table_cor_err_cnt),
4217 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4218                                 0, CNTR_NORMAL,
4219                                 access_cce_rxdma_conv_fifo_parity_err_cnt),
4220 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4221                                 0, CNTR_NORMAL,
4222                                 access_cce_rcpl_async_fifo_parity_err_cnt),
4223 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4224                                 CNTR_NORMAL,
4225                                 access_cce_seg_write_bad_addr_err_cnt),
4226 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4227                                 CNTR_NORMAL,
4228                                 access_cce_seg_read_bad_addr_err_cnt),
4229 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4230                                 CNTR_NORMAL,
4231                                 access_la_triggered_cnt),
4232 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4233                                 CNTR_NORMAL,
4234                                 access_cce_trgt_cpl_timeout_err_cnt),
4235 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4236                                 CNTR_NORMAL,
4237                                 access_pcic_receive_parity_err_cnt),
4238 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4239                                 CNTR_NORMAL,
4240                                 access_pcic_transmit_back_parity_err_cnt),
4241 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4242                                 0, CNTR_NORMAL,
4243                                 access_pcic_transmit_front_parity_err_cnt),
4244 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4245                                 CNTR_NORMAL,
4246                                 access_pcic_cpl_dat_q_unc_err_cnt),
4247 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4248                                 CNTR_NORMAL,
4249                                 access_pcic_cpl_hd_q_unc_err_cnt),
4250 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4251                                 CNTR_NORMAL,
4252                                 access_pcic_post_dat_q_unc_err_cnt),
4253 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4254                                 CNTR_NORMAL,
4255                                 access_pcic_post_hd_q_unc_err_cnt),
4256 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4257                                 CNTR_NORMAL,
4258                                 access_pcic_retry_sot_mem_unc_err_cnt),
4259 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4260                                 CNTR_NORMAL,
4261                                 access_pcic_retry_mem_unc_err),
4262 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4263                                 CNTR_NORMAL,
4264                                 access_pcic_n_post_dat_q_parity_err_cnt),
4265 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4266                                 CNTR_NORMAL,
4267                                 access_pcic_n_post_h_q_parity_err_cnt),
4268 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4269                                 CNTR_NORMAL,
4270                                 access_pcic_cpl_dat_q_cor_err_cnt),
4271 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4272                                 CNTR_NORMAL,
4273                                 access_pcic_cpl_hd_q_cor_err_cnt),
4274 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4275                                 CNTR_NORMAL,
4276                                 access_pcic_post_dat_q_cor_err_cnt),
4277 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4278                                 CNTR_NORMAL,
4279                                 access_pcic_post_hd_q_cor_err_cnt),
4280 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4281                                 CNTR_NORMAL,
4282                                 access_pcic_retry_sot_mem_cor_err_cnt),
4283 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4284                                 CNTR_NORMAL,
4285                                 access_pcic_retry_mem_cor_err_cnt),
4286 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4287                                 "CceCli1AsyncFifoDbgParityError", 0, 0,
4288                                 CNTR_NORMAL,
4289                                 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4290 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4291                                 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4292                                 CNTR_NORMAL,
4293                                 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4294                                 ),
4295 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4296                         "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4297                         CNTR_NORMAL,
4298                         access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4299 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4300                         "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4301                         CNTR_NORMAL,
4302                         access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4303 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4304                         0, CNTR_NORMAL,
4305                         access_cce_cli2_async_fifo_parity_err_cnt),
4306 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4307                         CNTR_NORMAL,
4308                         access_cce_csr_cfg_bus_parity_err_cnt),
4309 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4310                         0, CNTR_NORMAL,
4311                         access_cce_cli0_async_fifo_parity_err_cnt),
4312 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4313                         CNTR_NORMAL,
4314                         access_cce_rspd_data_parity_err_cnt),
4315 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4316                         CNTR_NORMAL,
4317                         access_cce_trgt_access_err_cnt),
4318 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4319                         0, CNTR_NORMAL,
4320                         access_cce_trgt_async_fifo_parity_err_cnt),
4321 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4322                         CNTR_NORMAL,
4323                         access_cce_csr_write_bad_addr_err_cnt),
4324 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4325                         CNTR_NORMAL,
4326                         access_cce_csr_read_bad_addr_err_cnt),
4327 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4328                         CNTR_NORMAL,
4329                         access_ccs_csr_parity_err_cnt),
4330
4331 /* RcvErrStatus */
4332 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4333                         CNTR_NORMAL,
4334                         access_rx_csr_parity_err_cnt),
4335 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4336                         CNTR_NORMAL,
4337                         access_rx_csr_write_bad_addr_err_cnt),
4338 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4339                         CNTR_NORMAL,
4340                         access_rx_csr_read_bad_addr_err_cnt),
4341 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4342                         CNTR_NORMAL,
4343                         access_rx_dma_csr_unc_err_cnt),
4344 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4345                         CNTR_NORMAL,
4346                         access_rx_dma_dq_fsm_encoding_err_cnt),
4347 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4348                         CNTR_NORMAL,
4349                         access_rx_dma_eq_fsm_encoding_err_cnt),
4350 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4351                         CNTR_NORMAL,
4352                         access_rx_dma_csr_parity_err_cnt),
4353 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4354                         CNTR_NORMAL,
4355                         access_rx_rbuf_data_cor_err_cnt),
4356 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4357                         CNTR_NORMAL,
4358                         access_rx_rbuf_data_unc_err_cnt),
4359 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4360                         CNTR_NORMAL,
4361                         access_rx_dma_data_fifo_rd_cor_err_cnt),
4362 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4363                         CNTR_NORMAL,
4364                         access_rx_dma_data_fifo_rd_unc_err_cnt),
4365 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4366                         CNTR_NORMAL,
4367                         access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4368 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4369                         CNTR_NORMAL,
4370                         access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4371 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4372                         CNTR_NORMAL,
4373                         access_rx_rbuf_desc_part2_cor_err_cnt),
4374 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4375                         CNTR_NORMAL,
4376                         access_rx_rbuf_desc_part2_unc_err_cnt),
4377 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4378                         CNTR_NORMAL,
4379                         access_rx_rbuf_desc_part1_cor_err_cnt),
4380 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4381                         CNTR_NORMAL,
4382                         access_rx_rbuf_desc_part1_unc_err_cnt),
4383 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4384                         CNTR_NORMAL,
4385                         access_rx_hq_intr_fsm_err_cnt),
4386 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4387                         CNTR_NORMAL,
4388                         access_rx_hq_intr_csr_parity_err_cnt),
4389 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4390                         CNTR_NORMAL,
4391                         access_rx_lookup_csr_parity_err_cnt),
4392 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4393                         CNTR_NORMAL,
4394                         access_rx_lookup_rcv_array_cor_err_cnt),
4395 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4396                         CNTR_NORMAL,
4397                         access_rx_lookup_rcv_array_unc_err_cnt),
4398 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4399                         0, CNTR_NORMAL,
4400                         access_rx_lookup_des_part2_parity_err_cnt),
4401 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4402                         0, CNTR_NORMAL,
4403                         access_rx_lookup_des_part1_unc_cor_err_cnt),
4404 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4405                         CNTR_NORMAL,
4406                         access_rx_lookup_des_part1_unc_err_cnt),
4407 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4408                         CNTR_NORMAL,
4409                         access_rx_rbuf_next_free_buf_cor_err_cnt),
4410 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4411                         CNTR_NORMAL,
4412                         access_rx_rbuf_next_free_buf_unc_err_cnt),
4413 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4414                         "RxRbufFlInitWrAddrParityErr", 0, 0,
4415                         CNTR_NORMAL,
4416                         access_rbuf_fl_init_wr_addr_parity_err_cnt),
4417 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4418                         0, CNTR_NORMAL,
4419                         access_rx_rbuf_fl_initdone_parity_err_cnt),
4420 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4421                         0, CNTR_NORMAL,
4422                         access_rx_rbuf_fl_write_addr_parity_err_cnt),
4423 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4424                         CNTR_NORMAL,
4425                         access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4426 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4427                         CNTR_NORMAL,
4428                         access_rx_rbuf_empty_err_cnt),
4429 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4430                         CNTR_NORMAL,
4431                         access_rx_rbuf_full_err_cnt),
4432 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4433                         CNTR_NORMAL,
4434                         access_rbuf_bad_lookup_err_cnt),
4435 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4436                         CNTR_NORMAL,
4437                         access_rbuf_ctx_id_parity_err_cnt),
4438 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4439                         CNTR_NORMAL,
4440                         access_rbuf_csr_qeopdw_parity_err_cnt),
4441 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4442                         "RxRbufCsrQNumOfPktParityErr", 0, 0,
4443                         CNTR_NORMAL,
4444                         access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4445 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4446                         "RxRbufCsrQTlPtrParityErr", 0, 0,
4447                         CNTR_NORMAL,
4448                         access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4449 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4450                         0, CNTR_NORMAL,
4451                         access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4452 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4453                         0, CNTR_NORMAL,
4454                         access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4455 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4456                         0, 0, CNTR_NORMAL,
4457                         access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4458 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4459                         0, CNTR_NORMAL,
4460                         access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4461 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4462                         "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4463                         CNTR_NORMAL,
4464                         access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4465 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4466                         0, CNTR_NORMAL,
4467                         access_rx_rbuf_block_list_read_cor_err_cnt),
4468 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4469                         0, CNTR_NORMAL,
4470                         access_rx_rbuf_block_list_read_unc_err_cnt),
4471 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4472                         CNTR_NORMAL,
4473                         access_rx_rbuf_lookup_des_cor_err_cnt),
4474 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4475                         CNTR_NORMAL,
4476                         access_rx_rbuf_lookup_des_unc_err_cnt),
4477 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4478                         "RxRbufLookupDesRegUncCorErr", 0, 0,
4479                         CNTR_NORMAL,
4480                         access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4481 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4482                         CNTR_NORMAL,
4483                         access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4484 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4485                         CNTR_NORMAL,
4486                         access_rx_rbuf_free_list_cor_err_cnt),
4487 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4488                         CNTR_NORMAL,
4489                         access_rx_rbuf_free_list_unc_err_cnt),
4490 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4491                         CNTR_NORMAL,
4492                         access_rx_rcv_fsm_encoding_err_cnt),
4493 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4494                         CNTR_NORMAL,
4495                         access_rx_dma_flag_cor_err_cnt),
4496 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4497                         CNTR_NORMAL,
4498                         access_rx_dma_flag_unc_err_cnt),
4499 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4500                         CNTR_NORMAL,
4501                         access_rx_dc_sop_eop_parity_err_cnt),
4502 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4503                         CNTR_NORMAL,
4504                         access_rx_rcv_csr_parity_err_cnt),
4505 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4506                         CNTR_NORMAL,
4507                         access_rx_rcv_qp_map_table_cor_err_cnt),
4508 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4509                         CNTR_NORMAL,
4510                         access_rx_rcv_qp_map_table_unc_err_cnt),
4511 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4512                         CNTR_NORMAL,
4513                         access_rx_rcv_data_cor_err_cnt),
4514 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4515                         CNTR_NORMAL,
4516                         access_rx_rcv_data_unc_err_cnt),
4517 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4518                         CNTR_NORMAL,
4519                         access_rx_rcv_hdr_cor_err_cnt),
4520 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4521                         CNTR_NORMAL,
4522                         access_rx_rcv_hdr_unc_err_cnt),
4523 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4524                         CNTR_NORMAL,
4525                         access_rx_dc_intf_parity_err_cnt),
4526 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4527                         CNTR_NORMAL,
4528                         access_rx_dma_csr_cor_err_cnt),
4529 /* SendPioErrStatus */
4530 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4531                         CNTR_NORMAL,
4532                         access_pio_pec_sop_head_parity_err_cnt),
4533 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4534                         CNTR_NORMAL,
4535                         access_pio_pcc_sop_head_parity_err_cnt),
4536 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4537                         0, 0, CNTR_NORMAL,
4538                         access_pio_last_returned_cnt_parity_err_cnt),
4539 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4540                         0, CNTR_NORMAL,
4541                         access_pio_current_free_cnt_parity_err_cnt),
4542 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4543                         CNTR_NORMAL,
4544                         access_pio_reserved_31_err_cnt),
4545 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4546                         CNTR_NORMAL,
4547                         access_pio_reserved_30_err_cnt),
4548 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4549                         CNTR_NORMAL,
4550                         access_pio_ppmc_sop_len_err_cnt),
4551 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4552                         CNTR_NORMAL,
4553                         access_pio_ppmc_bqc_mem_parity_err_cnt),
4554 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4555                         CNTR_NORMAL,
4556                         access_pio_vl_fifo_parity_err_cnt),
4557 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4558                         CNTR_NORMAL,
4559                         access_pio_vlf_sop_parity_err_cnt),
4560 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4561                         CNTR_NORMAL,
4562                         access_pio_vlf_v1_len_parity_err_cnt),
4563 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4564                         CNTR_NORMAL,
4565                         access_pio_block_qw_count_parity_err_cnt),
4566 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4567                         CNTR_NORMAL,
4568                         access_pio_write_qw_valid_parity_err_cnt),
4569 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4570                         CNTR_NORMAL,
4571                         access_pio_state_machine_err_cnt),
4572 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4573                         CNTR_NORMAL,
4574                         access_pio_write_data_parity_err_cnt),
4575 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4576                         CNTR_NORMAL,
4577                         access_pio_host_addr_mem_cor_err_cnt),
4578 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4579                         CNTR_NORMAL,
4580                         access_pio_host_addr_mem_unc_err_cnt),
4581 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4582                         CNTR_NORMAL,
4583                         access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4584 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4585                         CNTR_NORMAL,
4586                         access_pio_init_sm_in_err_cnt),
4587 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4588                         CNTR_NORMAL,
4589                         access_pio_ppmc_pbl_fifo_err_cnt),
4590 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4591                         0, CNTR_NORMAL,
4592                         access_pio_credit_ret_fifo_parity_err_cnt),
4593 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4594                         CNTR_NORMAL,
4595                         access_pio_v1_len_mem_bank1_cor_err_cnt),
4596 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4597                         CNTR_NORMAL,
4598                         access_pio_v1_len_mem_bank0_cor_err_cnt),
4599 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4600                         CNTR_NORMAL,
4601                         access_pio_v1_len_mem_bank1_unc_err_cnt),
4602 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4603                         CNTR_NORMAL,
4604                         access_pio_v1_len_mem_bank0_unc_err_cnt),
4605 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4606                         CNTR_NORMAL,
4607                         access_pio_sm_pkt_reset_parity_err_cnt),
4608 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4609                         CNTR_NORMAL,
4610                         access_pio_pkt_evict_fifo_parity_err_cnt),
4611 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4612                         "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4613                         CNTR_NORMAL,
4614                         access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4615 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4616                         CNTR_NORMAL,
4617                         access_pio_sbrdctl_crrel_parity_err_cnt),
4618 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4619                         CNTR_NORMAL,
4620                         access_pio_pec_fifo_parity_err_cnt),
4621 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4622                         CNTR_NORMAL,
4623                         access_pio_pcc_fifo_parity_err_cnt),
4624 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4625                         CNTR_NORMAL,
4626                         access_pio_sb_mem_fifo1_err_cnt),
4627 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4628                         CNTR_NORMAL,
4629                         access_pio_sb_mem_fifo0_err_cnt),
4630 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4631                         CNTR_NORMAL,
4632                         access_pio_csr_parity_err_cnt),
4633 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4634                         CNTR_NORMAL,
4635                         access_pio_write_addr_parity_err_cnt),
4636 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4637                         CNTR_NORMAL,
4638                         access_pio_write_bad_ctxt_err_cnt),
4639 /* SendDmaErrStatus */
4640 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4641                         0, CNTR_NORMAL,
4642                         access_sdma_pcie_req_tracking_cor_err_cnt),
4643 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4644                         0, CNTR_NORMAL,
4645                         access_sdma_pcie_req_tracking_unc_err_cnt),
4646 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4647                         CNTR_NORMAL,
4648                         access_sdma_csr_parity_err_cnt),
4649 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4650                         CNTR_NORMAL,
4651                         access_sdma_rpy_tag_err_cnt),
4652 /* SendEgressErrStatus */
4653 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4654                         CNTR_NORMAL,
4655                         access_tx_read_pio_memory_csr_unc_err_cnt),
4656 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4657                         0, CNTR_NORMAL,
4658                         access_tx_read_sdma_memory_csr_err_cnt),
4659 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4660                         CNTR_NORMAL,
4661                         access_tx_egress_fifo_cor_err_cnt),
4662 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4663                         CNTR_NORMAL,
4664                         access_tx_read_pio_memory_cor_err_cnt),
4665 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4666                         CNTR_NORMAL,
4667                         access_tx_read_sdma_memory_cor_err_cnt),
4668 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4669                         CNTR_NORMAL,
4670                         access_tx_sb_hdr_cor_err_cnt),
4671 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4672                         CNTR_NORMAL,
4673                         access_tx_credit_overrun_err_cnt),
4674 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4675                         CNTR_NORMAL,
4676                         access_tx_launch_fifo8_cor_err_cnt),
4677 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4678                         CNTR_NORMAL,
4679                         access_tx_launch_fifo7_cor_err_cnt),
4680 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4681                         CNTR_NORMAL,
4682                         access_tx_launch_fifo6_cor_err_cnt),
4683 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4684                         CNTR_NORMAL,
4685                         access_tx_launch_fifo5_cor_err_cnt),
4686 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4687                         CNTR_NORMAL,
4688                         access_tx_launch_fifo4_cor_err_cnt),
4689 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4690                         CNTR_NORMAL,
4691                         access_tx_launch_fifo3_cor_err_cnt),
4692 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4693                         CNTR_NORMAL,
4694                         access_tx_launch_fifo2_cor_err_cnt),
4695 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4696                         CNTR_NORMAL,
4697                         access_tx_launch_fifo1_cor_err_cnt),
4698 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4699                         CNTR_NORMAL,
4700                         access_tx_launch_fifo0_cor_err_cnt),
4701 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4702                         CNTR_NORMAL,
4703                         access_tx_credit_return_vl_err_cnt),
4704 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4705                         CNTR_NORMAL,
4706                         access_tx_hcrc_insertion_err_cnt),
4707 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4708                         CNTR_NORMAL,
4709                         access_tx_egress_fifo_unc_err_cnt),
4710 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4711                         CNTR_NORMAL,
4712                         access_tx_read_pio_memory_unc_err_cnt),
4713 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4714                         CNTR_NORMAL,
4715                         access_tx_read_sdma_memory_unc_err_cnt),
4716 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4717                         CNTR_NORMAL,
4718                         access_tx_sb_hdr_unc_err_cnt),
4719 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4720                         CNTR_NORMAL,
4721                         access_tx_credit_return_partiy_err_cnt),
4722 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4723                         0, 0, CNTR_NORMAL,
4724                         access_tx_launch_fifo8_unc_or_parity_err_cnt),
4725 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4726                         0, 0, CNTR_NORMAL,
4727                         access_tx_launch_fifo7_unc_or_parity_err_cnt),
4728 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4729                         0, 0, CNTR_NORMAL,
4730                         access_tx_launch_fifo6_unc_or_parity_err_cnt),
4731 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4732                         0, 0, CNTR_NORMAL,
4733                         access_tx_launch_fifo5_unc_or_parity_err_cnt),
4734 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4735                         0, 0, CNTR_NORMAL,
4736                         access_tx_launch_fifo4_unc_or_parity_err_cnt),
4737 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4738                         0, 0, CNTR_NORMAL,
4739                         access_tx_launch_fifo3_unc_or_parity_err_cnt),
4740 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4741                         0, 0, CNTR_NORMAL,
4742                         access_tx_launch_fifo2_unc_or_parity_err_cnt),
4743 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4744                         0, 0, CNTR_NORMAL,
4745                         access_tx_launch_fifo1_unc_or_parity_err_cnt),
4746 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4747                         0, 0, CNTR_NORMAL,
4748                         access_tx_launch_fifo0_unc_or_parity_err_cnt),
4749 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4750                         0, 0, CNTR_NORMAL,
4751                         access_tx_sdma15_disallowed_packet_err_cnt),
4752 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4753                         0, 0, CNTR_NORMAL,
4754                         access_tx_sdma14_disallowed_packet_err_cnt),
4755 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4756                         0, 0, CNTR_NORMAL,
4757                         access_tx_sdma13_disallowed_packet_err_cnt),
4758 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4759                         0, 0, CNTR_NORMAL,
4760                         access_tx_sdma12_disallowed_packet_err_cnt),
4761 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4762                         0, 0, CNTR_NORMAL,
4763                         access_tx_sdma11_disallowed_packet_err_cnt),
4764 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4765                         0, 0, CNTR_NORMAL,
4766                         access_tx_sdma10_disallowed_packet_err_cnt),
4767 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4768                         0, 0, CNTR_NORMAL,
4769                         access_tx_sdma9_disallowed_packet_err_cnt),
4770 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4771                         0, 0, CNTR_NORMAL,
4772                         access_tx_sdma8_disallowed_packet_err_cnt),
4773 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4774                         0, 0, CNTR_NORMAL,
4775                         access_tx_sdma7_disallowed_packet_err_cnt),
4776 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4777                         0, 0, CNTR_NORMAL,
4778                         access_tx_sdma6_disallowed_packet_err_cnt),
4779 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4780                         0, 0, CNTR_NORMAL,
4781                         access_tx_sdma5_disallowed_packet_err_cnt),
4782 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4783                         0, 0, CNTR_NORMAL,
4784                         access_tx_sdma4_disallowed_packet_err_cnt),
4785 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4786                         0, 0, CNTR_NORMAL,
4787                         access_tx_sdma3_disallowed_packet_err_cnt),
4788 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4789                         0, 0, CNTR_NORMAL,
4790                         access_tx_sdma2_disallowed_packet_err_cnt),
4791 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4792                         0, 0, CNTR_NORMAL,
4793                         access_tx_sdma1_disallowed_packet_err_cnt),
4794 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4795                         0, 0, CNTR_NORMAL,
4796                         access_tx_sdma0_disallowed_packet_err_cnt),
4797 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4798                         CNTR_NORMAL,
4799                         access_tx_config_parity_err_cnt),
4800 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4801                         CNTR_NORMAL,
4802                         access_tx_sbrd_ctl_csr_parity_err_cnt),
4803 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4804                         CNTR_NORMAL,
4805                         access_tx_launch_csr_parity_err_cnt),
4806 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4807                         CNTR_NORMAL,
4808                         access_tx_illegal_vl_err_cnt),
4809 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4810                         "TxSbrdCtlStateMachineParityErr", 0, 0,
4811                         CNTR_NORMAL,
4812                         access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4813 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4814                         CNTR_NORMAL,
4815                         access_egress_reserved_10_err_cnt),
4816 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4817                         CNTR_NORMAL,
4818                         access_egress_reserved_9_err_cnt),
4819 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4820                         0, 0, CNTR_NORMAL,
4821                         access_tx_sdma_launch_intf_parity_err_cnt),
4822 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4823                         CNTR_NORMAL,
4824                         access_tx_pio_launch_intf_parity_err_cnt),
4825 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4826                         CNTR_NORMAL,
4827                         access_egress_reserved_6_err_cnt),
4828 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4829                         CNTR_NORMAL,
4830                         access_tx_incorrect_link_state_err_cnt),
4831 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4832                         CNTR_NORMAL,
4833                         access_tx_linkdown_err_cnt),
4834 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4835                         "EgressFifoUnderrunOrParityErr", 0, 0,
4836                         CNTR_NORMAL,
4837                         access_tx_egress_fifi_underrun_or_parity_err_cnt),
4838 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4839                         CNTR_NORMAL,
4840                         access_egress_reserved_2_err_cnt),
4841 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4842                         CNTR_NORMAL,
4843                         access_tx_pkt_integrity_mem_unc_err_cnt),
4844 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4845                         CNTR_NORMAL,
4846                         access_tx_pkt_integrity_mem_cor_err_cnt),
4847 /* SendErrStatus */
4848 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4849                         CNTR_NORMAL,
4850                         access_send_csr_write_bad_addr_err_cnt),
4851 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4852                         CNTR_NORMAL,
4853                         access_send_csr_read_bad_addr_err_cnt),
4854 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4855                         CNTR_NORMAL,
4856                         access_send_csr_parity_cnt),
4857 /* SendCtxtErrStatus */
4858 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4859                         CNTR_NORMAL,
4860                         access_pio_write_out_of_bounds_err_cnt),
4861 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4862                         CNTR_NORMAL,
4863                         access_pio_write_overflow_err_cnt),
4864 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4865                         0, 0, CNTR_NORMAL,
4866                         access_pio_write_crosses_boundary_err_cnt),
4867 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4868                         CNTR_NORMAL,
4869                         access_pio_disallowed_packet_err_cnt),
4870 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4871                         CNTR_NORMAL,
4872                         access_pio_inconsistent_sop_err_cnt),
4873 /* SendDmaEngErrStatus */
4874 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4875                         0, 0, CNTR_NORMAL,
4876                         access_sdma_header_request_fifo_cor_err_cnt),
4877 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4878                         CNTR_NORMAL,
4879                         access_sdma_header_storage_cor_err_cnt),
4880 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
4881                         CNTR_NORMAL,
4882                         access_sdma_packet_tracking_cor_err_cnt),
4883 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
4884                         CNTR_NORMAL,
4885                         access_sdma_assembly_cor_err_cnt),
4886 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
4887                         CNTR_NORMAL,
4888                         access_sdma_desc_table_cor_err_cnt),
4889 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
4890                         0, 0, CNTR_NORMAL,
4891                         access_sdma_header_request_fifo_unc_err_cnt),
4892 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
4893                         CNTR_NORMAL,
4894                         access_sdma_header_storage_unc_err_cnt),
4895 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
4896                         CNTR_NORMAL,
4897                         access_sdma_packet_tracking_unc_err_cnt),
4898 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
4899                         CNTR_NORMAL,
4900                         access_sdma_assembly_unc_err_cnt),
4901 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
4902                         CNTR_NORMAL,
4903                         access_sdma_desc_table_unc_err_cnt),
4904 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
4905                         CNTR_NORMAL,
4906                         access_sdma_timeout_err_cnt),
4907 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
4908                         CNTR_NORMAL,
4909                         access_sdma_header_length_err_cnt),
4910 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
4911                         CNTR_NORMAL,
4912                         access_sdma_header_address_err_cnt),
4913 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
4914                         CNTR_NORMAL,
4915                         access_sdma_header_select_err_cnt),
4916 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
4917                         CNTR_NORMAL,
4918                         access_sdma_reserved_9_err_cnt),
4919 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
4920                         CNTR_NORMAL,
4921                         access_sdma_packet_desc_overflow_err_cnt),
4922 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
4923                         CNTR_NORMAL,
4924                         access_sdma_length_mismatch_err_cnt),
4925 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
4926                         CNTR_NORMAL,
4927                         access_sdma_halt_err_cnt),
4928 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
4929                         CNTR_NORMAL,
4930                         access_sdma_mem_read_err_cnt),
4931 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
4932                         CNTR_NORMAL,
4933                         access_sdma_first_desc_err_cnt),
4934 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
4935                         CNTR_NORMAL,
4936                         access_sdma_tail_out_of_bounds_err_cnt),
4937 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
4938                         CNTR_NORMAL,
4939                         access_sdma_too_long_err_cnt),
4940 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
4941                         CNTR_NORMAL,
4942                         access_sdma_gen_mismatch_err_cnt),
4943 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
4944                         CNTR_NORMAL,
4945                         access_sdma_wrong_dw_err_cnt),
4946 };
4947
4948 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
4949 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
4950                         CNTR_NORMAL),
4951 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
4952                         CNTR_NORMAL),
4953 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
4954                         CNTR_NORMAL),
4955 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
4956                         CNTR_NORMAL),
4957 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
4958                         CNTR_NORMAL),
4959 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
4960                         CNTR_NORMAL),
4961 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
4962                         CNTR_NORMAL),
4963 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
4964 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
4965 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
4966 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
4967                                       CNTR_SYNTH | CNTR_VL),
4968 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
4969                                      CNTR_SYNTH | CNTR_VL),
4970 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
4971                                       CNTR_SYNTH | CNTR_VL),
4972 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
4973 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
4974 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4975                              access_sw_link_dn_cnt),
4976 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4977                            access_sw_link_up_cnt),
4978 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
4979                                  access_sw_unknown_frame_cnt),
4980 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
4981                              access_sw_xmit_discards),
4982 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
4983                                 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
4984                                 access_sw_xmit_discards),
4985 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
4986                                  access_xmit_constraint_errs),
4987 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
4988                                 access_rcv_constraint_errs),
4989 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
4990 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
4991 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
4992 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
4993 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
4994 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
4995 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
4996 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
4997 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
4998 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
4999 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5000 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5001 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5002                                access_sw_cpu_rc_acks),
5003 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5004                                 access_sw_cpu_rc_qacks),
5005 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5006                                        access_sw_cpu_rc_delayed_comp),
5007 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5008 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5009 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5010 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5011 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5012 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5013 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5014 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5015 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5016 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5017 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5018 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5019 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5020 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5021 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5022 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5023 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5024 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5025 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5026 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5027 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5028 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5029 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5030 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5031 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5032 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5033 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5034 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5035 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5036 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5037 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5038 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5039 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5040 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5041 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5042 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5043 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5044 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5045 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5046 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5047 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5048 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5049 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5050 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5051 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5052 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5053 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5054 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5055 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5056 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5057 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5058 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5059 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5060 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5061 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5062 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5063 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5064 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5065 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5066 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5067 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5068 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5069 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5070 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5071 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5072 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5073 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5074 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5075 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5076 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5077 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5078 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5079 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5080 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5081 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5082 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5083 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5084 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5085 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5086 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5087 };
5088
5089 /* ======================================================================== */
5090
5091 /* return true if this is chip revision revision a */
5092 int is_ax(struct hfi1_devdata *dd)
5093 {
5094         u8 chip_rev_minor =
5095                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5096                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5097         return (chip_rev_minor & 0xf0) == 0;
5098 }
5099
5100 /* return true if this is chip revision revision b */
5101 int is_bx(struct hfi1_devdata *dd)
5102 {
5103         u8 chip_rev_minor =
5104                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5105                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5106         return (chip_rev_minor & 0xF0) == 0x10;
5107 }
5108
5109 /*
5110  * Append string s to buffer buf.  Arguments curp and len are the current
5111  * position and remaining length, respectively.
5112  *
5113  * return 0 on success, 1 on out of room
5114  */
5115 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5116 {
5117         char *p = *curp;
5118         int len = *lenp;
5119         int result = 0; /* success */
5120         char c;
5121
5122         /* add a comma, if first in the buffer */
5123         if (p != buf) {
5124                 if (len == 0) {
5125                         result = 1; /* out of room */
5126                         goto done;
5127                 }
5128                 *p++ = ',';
5129                 len--;
5130         }
5131
5132         /* copy the string */
5133         while ((c = *s++) != 0) {
5134                 if (len == 0) {
5135                         result = 1; /* out of room */
5136                         goto done;
5137                 }
5138                 *p++ = c;
5139                 len--;
5140         }
5141
5142 done:
5143         /* write return values */
5144         *curp = p;
5145         *lenp = len;
5146
5147         return result;
5148 }
5149
5150 /*
5151  * Using the given flag table, print a comma separated string into
5152  * the buffer.  End in '*' if the buffer is too short.
5153  */
5154 static char *flag_string(char *buf, int buf_len, u64 flags,
5155                          struct flag_table *table, int table_size)
5156 {
5157         char extra[32];
5158         char *p = buf;
5159         int len = buf_len;
5160         int no_room = 0;
5161         int i;
5162
5163         /* make sure there is at least 2 so we can form "*" */
5164         if (len < 2)
5165                 return "";
5166
5167         len--;  /* leave room for a nul */
5168         for (i = 0; i < table_size; i++) {
5169                 if (flags & table[i].flag) {
5170                         no_room = append_str(buf, &p, &len, table[i].str);
5171                         if (no_room)
5172                                 break;
5173                         flags &= ~table[i].flag;
5174                 }
5175         }
5176
5177         /* any undocumented bits left? */
5178         if (!no_room && flags) {
5179                 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5180                 no_room = append_str(buf, &p, &len, extra);
5181         }
5182
5183         /* add * if ran out of room */
5184         if (no_room) {
5185                 /* may need to back up to add space for a '*' */
5186                 if (len == 0)
5187                         --p;
5188                 *p++ = '*';
5189         }
5190
5191         /* add final nul - space already allocated above */
5192         *p = 0;
5193         return buf;
5194 }
5195
5196 /* first 8 CCE error interrupt source names */
5197 static const char * const cce_misc_names[] = {
5198         "CceErrInt",            /* 0 */
5199         "RxeErrInt",            /* 1 */
5200         "MiscErrInt",           /* 2 */
5201         "Reserved3",            /* 3 */
5202         "PioErrInt",            /* 4 */
5203         "SDmaErrInt",           /* 5 */
5204         "EgressErrInt",         /* 6 */
5205         "TxeErrInt"             /* 7 */
5206 };
5207
5208 /*
5209  * Return the miscellaneous error interrupt name.
5210  */
5211 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5212 {
5213         if (source < ARRAY_SIZE(cce_misc_names))
5214                 strncpy(buf, cce_misc_names[source], bsize);
5215         else
5216                 snprintf(buf, bsize, "Reserved%u",
5217                          source + IS_GENERAL_ERR_START);
5218
5219         return buf;
5220 }
5221
5222 /*
5223  * Return the SDMA engine error interrupt name.
5224  */
5225 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5226 {
5227         snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5228         return buf;
5229 }
5230
5231 /*
5232  * Return the send context error interrupt name.
5233  */
5234 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5235 {
5236         snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5237         return buf;
5238 }
5239
5240 static const char * const various_names[] = {
5241         "PbcInt",
5242         "GpioAssertInt",
5243         "Qsfp1Int",
5244         "Qsfp2Int",
5245         "TCritInt"
5246 };
5247
5248 /*
5249  * Return the various interrupt name.
5250  */
5251 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5252 {
5253         if (source < ARRAY_SIZE(various_names))
5254                 strncpy(buf, various_names[source], bsize);
5255         else
5256                 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5257         return buf;
5258 }
5259
5260 /*
5261  * Return the DC interrupt name.
5262  */
5263 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5264 {
5265         static const char * const dc_int_names[] = {
5266                 "common",
5267                 "lcb",
5268                 "8051",
5269                 "lbm"   /* local block merge */
5270         };
5271
5272         if (source < ARRAY_SIZE(dc_int_names))
5273                 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5274         else
5275                 snprintf(buf, bsize, "DCInt%u", source);
5276         return buf;
5277 }
5278
5279 static const char * const sdma_int_names[] = {
5280         "SDmaInt",
5281         "SdmaIdleInt",
5282         "SdmaProgressInt",
5283 };
5284
5285 /*
5286  * Return the SDMA engine interrupt name.
5287  */
5288 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5289 {
5290         /* what interrupt */
5291         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5292         /* which engine */
5293         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5294
5295         if (likely(what < 3))
5296                 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5297         else
5298                 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5299         return buf;
5300 }
5301
5302 /*
5303  * Return the receive available interrupt name.
5304  */
5305 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5306 {
5307         snprintf(buf, bsize, "RcvAvailInt%u", source);
5308         return buf;
5309 }
5310
5311 /*
5312  * Return the receive urgent interrupt name.
5313  */
5314 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5315 {
5316         snprintf(buf, bsize, "RcvUrgentInt%u", source);
5317         return buf;
5318 }
5319
5320 /*
5321  * Return the send credit interrupt name.
5322  */
5323 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5324 {
5325         snprintf(buf, bsize, "SendCreditInt%u", source);
5326         return buf;
5327 }
5328
5329 /*
5330  * Return the reserved interrupt name.
5331  */
5332 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5333 {
5334         snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5335         return buf;
5336 }
5337
5338 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5339 {
5340         return flag_string(buf, buf_len, flags,
5341                            cce_err_status_flags,
5342                            ARRAY_SIZE(cce_err_status_flags));
5343 }
5344
5345 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5346 {
5347         return flag_string(buf, buf_len, flags,
5348                            rxe_err_status_flags,
5349                            ARRAY_SIZE(rxe_err_status_flags));
5350 }
5351
5352 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5353 {
5354         return flag_string(buf, buf_len, flags, misc_err_status_flags,
5355                            ARRAY_SIZE(misc_err_status_flags));
5356 }
5357
5358 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5359 {
5360         return flag_string(buf, buf_len, flags,
5361                            pio_err_status_flags,
5362                            ARRAY_SIZE(pio_err_status_flags));
5363 }
5364
5365 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5366 {
5367         return flag_string(buf, buf_len, flags,
5368                            sdma_err_status_flags,
5369                            ARRAY_SIZE(sdma_err_status_flags));
5370 }
5371
5372 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5373 {
5374         return flag_string(buf, buf_len, flags,
5375                            egress_err_status_flags,
5376                            ARRAY_SIZE(egress_err_status_flags));
5377 }
5378
5379 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5380 {
5381         return flag_string(buf, buf_len, flags,
5382                            egress_err_info_flags,
5383                            ARRAY_SIZE(egress_err_info_flags));
5384 }
5385
5386 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5387 {
5388         return flag_string(buf, buf_len, flags,
5389                            send_err_status_flags,
5390                            ARRAY_SIZE(send_err_status_flags));
5391 }
5392
5393 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5394 {
5395         char buf[96];
5396         int i = 0;
5397
5398         /*
5399          * For most these errors, there is nothing that can be done except
5400          * report or record it.
5401          */
5402         dd_dev_info(dd, "CCE Error: %s\n",
5403                     cce_err_status_string(buf, sizeof(buf), reg));
5404
5405         if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5406             is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5407                 /* this error requires a manual drop into SPC freeze mode */
5408                 /* then a fix up */
5409                 start_freeze_handling(dd->pport, FREEZE_SELF);
5410         }
5411
5412         for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5413                 if (reg & (1ull << i)) {
5414                         incr_cntr64(&dd->cce_err_status_cnt[i]);
5415                         /* maintain a counter over all cce_err_status errors */
5416                         incr_cntr64(&dd->sw_cce_err_status_aggregate);
5417                 }
5418         }
5419 }
5420
5421 /*
5422  * Check counters for receive errors that do not have an interrupt
5423  * associated with them.
5424  */
5425 #define RCVERR_CHECK_TIME 10
5426 static void update_rcverr_timer(unsigned long opaque)
5427 {
5428         struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
5429         struct hfi1_pportdata *ppd = dd->pport;
5430         u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5431
5432         if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5433             ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5434                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5435                 set_link_down_reason(
5436                 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5437                 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5438                 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
5439         }
5440         dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5441
5442         mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5443 }
5444
5445 static int init_rcverr(struct hfi1_devdata *dd)
5446 {
5447         setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
5448         /* Assume the hardware counter has been reset */
5449         dd->rcv_ovfl_cnt = 0;
5450         return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5451 }
5452
5453 static void free_rcverr(struct hfi1_devdata *dd)
5454 {
5455         if (dd->rcverr_timer.data)
5456                 del_timer_sync(&dd->rcverr_timer);
5457         dd->rcverr_timer.data = 0;
5458 }
5459
5460 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5461 {
5462         char buf[96];
5463         int i = 0;
5464
5465         dd_dev_info(dd, "Receive Error: %s\n",
5466                     rxe_err_status_string(buf, sizeof(buf), reg));
5467
5468         if (reg & ALL_RXE_FREEZE_ERR) {
5469                 int flags = 0;
5470
5471                 /*
5472                  * Freeze mode recovery is disabled for the errors
5473                  * in RXE_FREEZE_ABORT_MASK
5474                  */
5475                 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5476                         flags = FREEZE_ABORT;
5477
5478                 start_freeze_handling(dd->pport, flags);
5479         }
5480
5481         for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5482                 if (reg & (1ull << i))
5483                         incr_cntr64(&dd->rcv_err_status_cnt[i]);
5484         }
5485 }
5486
5487 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5488 {
5489         char buf[96];
5490         int i = 0;
5491
5492         dd_dev_info(dd, "Misc Error: %s",
5493                     misc_err_status_string(buf, sizeof(buf), reg));
5494         for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5495                 if (reg & (1ull << i))
5496                         incr_cntr64(&dd->misc_err_status_cnt[i]);
5497         }
5498 }
5499
5500 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5501 {
5502         char buf[96];
5503         int i = 0;
5504
5505         dd_dev_info(dd, "PIO Error: %s\n",
5506                     pio_err_status_string(buf, sizeof(buf), reg));
5507
5508         if (reg & ALL_PIO_FREEZE_ERR)
5509                 start_freeze_handling(dd->pport, 0);
5510
5511         for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5512                 if (reg & (1ull << i))
5513                         incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5514         }
5515 }
5516
5517 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5518 {
5519         char buf[96];
5520         int i = 0;
5521
5522         dd_dev_info(dd, "SDMA Error: %s\n",
5523                     sdma_err_status_string(buf, sizeof(buf), reg));
5524
5525         if (reg & ALL_SDMA_FREEZE_ERR)
5526                 start_freeze_handling(dd->pport, 0);
5527
5528         for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5529                 if (reg & (1ull << i))
5530                         incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5531         }
5532 }
5533
5534 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5535 {
5536         incr_cntr64(&ppd->port_xmit_discards);
5537 }
5538
5539 static void count_port_inactive(struct hfi1_devdata *dd)
5540 {
5541         __count_port_discards(dd->pport);
5542 }
5543
5544 /*
5545  * We have had a "disallowed packet" error during egress. Determine the
5546  * integrity check which failed, and update relevant error counter, etc.
5547  *
5548  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5549  * bit of state per integrity check, and so we can miss the reason for an
5550  * egress error if more than one packet fails the same integrity check
5551  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5552  */
5553 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5554                                         int vl)
5555 {
5556         struct hfi1_pportdata *ppd = dd->pport;
5557         u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5558         u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5559         char buf[96];
5560
5561         /* clear down all observed info as quickly as possible after read */
5562         write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5563
5564         dd_dev_info(dd,
5565                     "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5566                     info, egress_err_info_string(buf, sizeof(buf), info), src);
5567
5568         /* Eventually add other counters for each bit */
5569         if (info & PORT_DISCARD_EGRESS_ERRS) {
5570                 int weight, i;
5571
5572                 /*
5573                  * Count all applicable bits as individual errors and
5574                  * attribute them to the packet that triggered this handler.
5575                  * This may not be completely accurate due to limitations
5576                  * on the available hardware error information.  There is
5577                  * a single information register and any number of error
5578                  * packets may have occurred and contributed to it before
5579                  * this routine is called.  This means that:
5580                  * a) If multiple packets with the same error occur before
5581                  *    this routine is called, earlier packets are missed.
5582                  *    There is only a single bit for each error type.
5583                  * b) Errors may not be attributed to the correct VL.
5584                  *    The driver is attributing all bits in the info register
5585                  *    to the packet that triggered this call, but bits
5586                  *    could be an accumulation of different packets with
5587                  *    different VLs.
5588                  * c) A single error packet may have multiple counts attached
5589                  *    to it.  There is no way for the driver to know if
5590                  *    multiple bits set in the info register are due to a
5591                  *    single packet or multiple packets.  The driver assumes
5592                  *    multiple packets.
5593                  */
5594                 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5595                 for (i = 0; i < weight; i++) {
5596                         __count_port_discards(ppd);
5597                         if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5598                                 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5599                         else if (vl == 15)
5600                                 incr_cntr64(&ppd->port_xmit_discards_vl
5601                                             [C_VL_15]);
5602                 }
5603         }
5604 }
5605
5606 /*
5607  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5608  * register. Does it represent a 'port inactive' error?
5609  */
5610 static inline int port_inactive_err(u64 posn)
5611 {
5612         return (posn >= SEES(TX_LINKDOWN) &&
5613                 posn <= SEES(TX_INCORRECT_LINK_STATE));
5614 }
5615
5616 /*
5617  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5618  * register. Does it represent a 'disallowed packet' error?
5619  */
5620 static inline int disallowed_pkt_err(int posn)
5621 {
5622         return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5623                 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5624 }
5625
5626 /*
5627  * Input value is a bit position of one of the SDMA engine disallowed
5628  * packet errors.  Return which engine.  Use of this must be guarded by
5629  * disallowed_pkt_err().
5630  */
5631 static inline int disallowed_pkt_engine(int posn)
5632 {
5633         return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5634 }
5635
5636 /*
5637  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5638  * be done.
5639  */
5640 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5641 {
5642         struct sdma_vl_map *m;
5643         int vl;
5644
5645         /* range check */
5646         if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5647                 return -1;
5648
5649         rcu_read_lock();
5650         m = rcu_dereference(dd->sdma_map);
5651         vl = m->engine_to_vl[engine];
5652         rcu_read_unlock();
5653
5654         return vl;
5655 }
5656
5657 /*
5658  * Translate the send context (sofware index) into a VL.  Return -1 if the
5659  * translation cannot be done.
5660  */
5661 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5662 {
5663         struct send_context_info *sci;
5664         struct send_context *sc;
5665         int i;
5666
5667         sci = &dd->send_contexts[sw_index];
5668
5669         /* there is no information for user (PSM) and ack contexts */
5670         if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5671                 return -1;
5672
5673         sc = sci->sc;
5674         if (!sc)
5675                 return -1;
5676         if (dd->vld[15].sc == sc)
5677                 return 15;
5678         for (i = 0; i < num_vls; i++)
5679                 if (dd->vld[i].sc == sc)
5680                         return i;
5681
5682         return -1;
5683 }
5684
5685 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5686 {
5687         u64 reg_copy = reg, handled = 0;
5688         char buf[96];
5689         int i = 0;
5690
5691         if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5692                 start_freeze_handling(dd->pport, 0);
5693         else if (is_ax(dd) &&
5694                  (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5695                  (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5696                 start_freeze_handling(dd->pport, 0);
5697
5698         while (reg_copy) {
5699                 int posn = fls64(reg_copy);
5700                 /* fls64() returns a 1-based offset, we want it zero based */
5701                 int shift = posn - 1;
5702                 u64 mask = 1ULL << shift;
5703
5704                 if (port_inactive_err(shift)) {
5705                         count_port_inactive(dd);
5706                         handled |= mask;
5707                 } else if (disallowed_pkt_err(shift)) {
5708                         int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5709
5710                         handle_send_egress_err_info(dd, vl);
5711                         handled |= mask;
5712                 }
5713                 reg_copy &= ~mask;
5714         }
5715
5716         reg &= ~handled;
5717
5718         if (reg)
5719                 dd_dev_info(dd, "Egress Error: %s\n",
5720                             egress_err_status_string(buf, sizeof(buf), reg));
5721
5722         for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5723                 if (reg & (1ull << i))
5724                         incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5725         }
5726 }
5727
5728 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5729 {
5730         char buf[96];
5731         int i = 0;
5732
5733         dd_dev_info(dd, "Send Error: %s\n",
5734                     send_err_status_string(buf, sizeof(buf), reg));
5735
5736         for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5737                 if (reg & (1ull << i))
5738                         incr_cntr64(&dd->send_err_status_cnt[i]);
5739         }
5740 }
5741
5742 /*
5743  * The maximum number of times the error clear down will loop before
5744  * blocking a repeating error.  This value is arbitrary.
5745  */
5746 #define MAX_CLEAR_COUNT 20
5747
5748 /*
5749  * Clear and handle an error register.  All error interrupts are funneled
5750  * through here to have a central location to correctly handle single-
5751  * or multi-shot errors.
5752  *
5753  * For non per-context registers, call this routine with a context value
5754  * of 0 so the per-context offset is zero.
5755  *
5756  * If the handler loops too many times, assume that something is wrong
5757  * and can't be fixed, so mask the error bits.
5758  */
5759 static void interrupt_clear_down(struct hfi1_devdata *dd,
5760                                  u32 context,
5761                                  const struct err_reg_info *eri)
5762 {
5763         u64 reg;
5764         u32 count;
5765
5766         /* read in a loop until no more errors are seen */
5767         count = 0;
5768         while (1) {
5769                 reg = read_kctxt_csr(dd, context, eri->status);
5770                 if (reg == 0)
5771                         break;
5772                 write_kctxt_csr(dd, context, eri->clear, reg);
5773                 if (likely(eri->handler))
5774                         eri->handler(dd, context, reg);
5775                 count++;
5776                 if (count > MAX_CLEAR_COUNT) {
5777                         u64 mask;
5778
5779                         dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5780                                    eri->desc, reg);
5781                         /*
5782                          * Read-modify-write so any other masked bits
5783                          * remain masked.
5784                          */
5785                         mask = read_kctxt_csr(dd, context, eri->mask);
5786                         mask &= ~reg;
5787                         write_kctxt_csr(dd, context, eri->mask, mask);
5788                         break;
5789                 }
5790         }
5791 }
5792
5793 /*
5794  * CCE block "misc" interrupt.  Source is < 16.
5795  */
5796 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5797 {
5798         const struct err_reg_info *eri = &misc_errs[source];
5799
5800         if (eri->handler) {
5801                 interrupt_clear_down(dd, 0, eri);
5802         } else {
5803                 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5804                            source);
5805         }
5806 }
5807
5808 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5809 {
5810         return flag_string(buf, buf_len, flags,
5811                            sc_err_status_flags,
5812                            ARRAY_SIZE(sc_err_status_flags));
5813 }
5814
5815 /*
5816  * Send context error interrupt.  Source (hw_context) is < 160.
5817  *
5818  * All send context errors cause the send context to halt.  The normal
5819  * clear-down mechanism cannot be used because we cannot clear the
5820  * error bits until several other long-running items are done first.
5821  * This is OK because with the context halted, nothing else is going
5822  * to happen on it anyway.
5823  */
5824 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5825                                 unsigned int hw_context)
5826 {
5827         struct send_context_info *sci;
5828         struct send_context *sc;
5829         char flags[96];
5830         u64 status;
5831         u32 sw_index;
5832         int i = 0;
5833
5834         sw_index = dd->hw_to_sw[hw_context];
5835         if (sw_index >= dd->num_send_contexts) {
5836                 dd_dev_err(dd,
5837                            "out of range sw index %u for send context %u\n",
5838                            sw_index, hw_context);
5839                 return;
5840         }
5841         sci = &dd->send_contexts[sw_index];
5842         sc = sci->sc;
5843         if (!sc) {
5844                 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5845                            sw_index, hw_context);
5846                 return;
5847         }
5848
5849         /* tell the software that a halt has begun */
5850         sc_stop(sc, SCF_HALTED);
5851
5852         status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5853
5854         dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5855                     send_context_err_status_string(flags, sizeof(flags),
5856                                                    status));
5857
5858         if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5859                 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5860
5861         /*
5862          * Automatically restart halted kernel contexts out of interrupt
5863          * context.  User contexts must ask the driver to restart the context.
5864          */
5865         if (sc->type != SC_USER)
5866                 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
5867
5868         /*
5869          * Update the counters for the corresponding status bits.
5870          * Note that these particular counters are aggregated over all
5871          * 160 contexts.
5872          */
5873         for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
5874                 if (status & (1ull << i))
5875                         incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
5876         }
5877 }
5878
5879 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
5880                                 unsigned int source, u64 status)
5881 {
5882         struct sdma_engine *sde;
5883         int i = 0;
5884
5885         sde = &dd->per_sdma[source];
5886 #ifdef CONFIG_SDMA_VERBOSITY
5887         dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5888                    slashstrip(__FILE__), __LINE__, __func__);
5889         dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
5890                    sde->this_idx, source, (unsigned long long)status);
5891 #endif
5892         sde->err_cnt++;
5893         sdma_engine_error(sde, status);
5894
5895         /*
5896         * Update the counters for the corresponding status bits.
5897         * Note that these particular counters are aggregated over
5898         * all 16 DMA engines.
5899         */
5900         for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
5901                 if (status & (1ull << i))
5902                         incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
5903         }
5904 }
5905
5906 /*
5907  * CCE block SDMA error interrupt.  Source is < 16.
5908  */
5909 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
5910 {
5911 #ifdef CONFIG_SDMA_VERBOSITY
5912         struct sdma_engine *sde = &dd->per_sdma[source];
5913
5914         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
5915                    slashstrip(__FILE__), __LINE__, __func__);
5916         dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
5917                    source);
5918         sdma_dumpstate(sde);
5919 #endif
5920         interrupt_clear_down(dd, source, &sdma_eng_err);
5921 }
5922
5923 /*
5924  * CCE block "various" interrupt.  Source is < 8.
5925  */
5926 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
5927 {
5928         const struct err_reg_info *eri = &various_err[source];
5929
5930         /*
5931          * TCritInt cannot go through interrupt_clear_down()
5932          * because it is not a second tier interrupt. The handler
5933          * should be called directly.
5934          */
5935         if (source == TCRIT_INT_SOURCE)
5936                 handle_temp_err(dd);
5937         else if (eri->handler)
5938                 interrupt_clear_down(dd, 0, eri);
5939         else
5940                 dd_dev_info(dd,
5941                             "%s: Unimplemented/reserved interrupt %d\n",
5942                             __func__, source);
5943 }
5944
5945 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
5946 {
5947         /* src_ctx is always zero */
5948         struct hfi1_pportdata *ppd = dd->pport;
5949         unsigned long flags;
5950         u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
5951
5952         if (reg & QSFP_HFI0_MODPRST_N) {
5953                 if (!qsfp_mod_present(ppd)) {
5954                         dd_dev_info(dd, "%s: QSFP module removed\n",
5955                                     __func__);
5956
5957                         ppd->driver_link_ready = 0;
5958                         /*
5959                          * Cable removed, reset all our information about the
5960                          * cache and cable capabilities
5961                          */
5962
5963                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
5964                         /*
5965                          * We don't set cache_refresh_required here as we expect
5966                          * an interrupt when a cable is inserted
5967                          */
5968                         ppd->qsfp_info.cache_valid = 0;
5969                         ppd->qsfp_info.reset_needed = 0;
5970                         ppd->qsfp_info.limiting_active = 0;
5971                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
5972                                                flags);
5973                         /* Invert the ModPresent pin now to detect plug-in */
5974                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
5975                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
5976
5977                         if ((ppd->offline_disabled_reason >
5978                           HFI1_ODR_MASK(
5979                           OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
5980                           (ppd->offline_disabled_reason ==
5981                           HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
5982                                 ppd->offline_disabled_reason =
5983                                 HFI1_ODR_MASK(
5984                                 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
5985
5986                         if (ppd->host_link_state == HLS_DN_POLL) {
5987                                 /*
5988                                  * The link is still in POLL. This means
5989                                  * that the normal link down processing
5990                                  * will not happen. We have to do it here
5991                                  * before turning the DC off.
5992                                  */
5993                                 queue_work(ppd->hfi1_wq, &ppd->link_down_work);
5994                         }
5995                 } else {
5996                         dd_dev_info(dd, "%s: QSFP module inserted\n",
5997                                     __func__);
5998
5999                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6000                         ppd->qsfp_info.cache_valid = 0;
6001                         ppd->qsfp_info.cache_refresh_required = 1;
6002                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6003                                                flags);
6004
6005                         /*
6006                          * Stop inversion of ModPresent pin to detect
6007                          * removal of the cable
6008                          */
6009                         qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6010                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6011                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6012
6013                         ppd->offline_disabled_reason =
6014                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6015                 }
6016         }
6017
6018         if (reg & QSFP_HFI0_INT_N) {
6019                 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6020                             __func__);
6021                 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6022                 ppd->qsfp_info.check_interrupt_flags = 1;
6023                 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6024         }
6025
6026         /* Schedule the QSFP work only if there is a cable attached. */
6027         if (qsfp_mod_present(ppd))
6028                 queue_work(ppd->hfi1_wq, &ppd->qsfp_info.qsfp_work);
6029 }
6030
6031 static int request_host_lcb_access(struct hfi1_devdata *dd)
6032 {
6033         int ret;
6034
6035         ret = do_8051_command(dd, HCMD_MISC,
6036                               (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6037                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6038         if (ret != HCMD_SUCCESS) {
6039                 dd_dev_err(dd, "%s: command failed with error %d\n",
6040                            __func__, ret);
6041         }
6042         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6043 }
6044
6045 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6046 {
6047         int ret;
6048
6049         ret = do_8051_command(dd, HCMD_MISC,
6050                               (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6051                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6052         if (ret != HCMD_SUCCESS) {
6053                 dd_dev_err(dd, "%s: command failed with error %d\n",
6054                            __func__, ret);
6055         }
6056         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6057 }
6058
6059 /*
6060  * Set the LCB selector - allow host access.  The DCC selector always
6061  * points to the host.
6062  */
6063 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6064 {
6065         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6066                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6067                   DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6068 }
6069
6070 /*
6071  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6072  * points to the host.
6073  */
6074 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6075 {
6076         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6077                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6078 }
6079
6080 /*
6081  * Acquire LCB access from the 8051.  If the host already has access,
6082  * just increment a counter.  Otherwise, inform the 8051 that the
6083  * host is taking access.
6084  *
6085  * Returns:
6086  *      0 on success
6087  *      -EBUSY if the 8051 has control and cannot be disturbed
6088  *      -errno if unable to acquire access from the 8051
6089  */
6090 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6091 {
6092         struct hfi1_pportdata *ppd = dd->pport;
6093         int ret = 0;
6094
6095         /*
6096          * Use the host link state lock so the operation of this routine
6097          * { link state check, selector change, count increment } can occur
6098          * as a unit against a link state change.  Otherwise there is a
6099          * race between the state change and the count increment.
6100          */
6101         if (sleep_ok) {
6102                 mutex_lock(&ppd->hls_lock);
6103         } else {
6104                 while (!mutex_trylock(&ppd->hls_lock))
6105                         udelay(1);
6106         }
6107
6108         /* this access is valid only when the link is up */
6109         if (ppd->host_link_state & HLS_DOWN) {
6110                 dd_dev_info(dd, "%s: link state %s not up\n",
6111                             __func__, link_state_name(ppd->host_link_state));
6112                 ret = -EBUSY;
6113                 goto done;
6114         }
6115
6116         if (dd->lcb_access_count == 0) {
6117                 ret = request_host_lcb_access(dd);
6118                 if (ret) {
6119                         dd_dev_err(dd,
6120                                    "%s: unable to acquire LCB access, err %d\n",
6121                                    __func__, ret);
6122                         goto done;
6123                 }
6124                 set_host_lcb_access(dd);
6125         }
6126         dd->lcb_access_count++;
6127 done:
6128         mutex_unlock(&ppd->hls_lock);
6129         return ret;
6130 }
6131
6132 /*
6133  * Release LCB access by decrementing the use count.  If the count is moving
6134  * from 1 to 0, inform 8051 that it has control back.
6135  *
6136  * Returns:
6137  *      0 on success
6138  *      -errno if unable to release access to the 8051
6139  */
6140 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6141 {
6142         int ret = 0;
6143
6144         /*
6145          * Use the host link state lock because the acquire needed it.
6146          * Here, we only need to keep { selector change, count decrement }
6147          * as a unit.
6148          */
6149         if (sleep_ok) {
6150                 mutex_lock(&dd->pport->hls_lock);
6151         } else {
6152                 while (!mutex_trylock(&dd->pport->hls_lock))
6153                         udelay(1);
6154         }
6155
6156         if (dd->lcb_access_count == 0) {
6157                 dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6158                            __func__);
6159                 goto done;
6160         }
6161
6162         if (dd->lcb_access_count == 1) {
6163                 set_8051_lcb_access(dd);
6164                 ret = request_8051_lcb_access(dd);
6165                 if (ret) {
6166                         dd_dev_err(dd,
6167                                    "%s: unable to release LCB access, err %d\n",
6168                                    __func__, ret);
6169                         /* restore host access if the grant didn't work */
6170                         set_host_lcb_access(dd);
6171                         goto done;
6172                 }
6173         }
6174         dd->lcb_access_count--;
6175 done:
6176         mutex_unlock(&dd->pport->hls_lock);
6177         return ret;
6178 }
6179
6180 /*
6181  * Initialize LCB access variables and state.  Called during driver load,
6182  * after most of the initialization is finished.
6183  *
6184  * The DC default is LCB access on for the host.  The driver defaults to
6185  * leaving access to the 8051.  Assign access now - this constrains the call
6186  * to this routine to be after all LCB set-up is done.  In particular, after
6187  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6188  */
6189 static void init_lcb_access(struct hfi1_devdata *dd)
6190 {
6191         dd->lcb_access_count = 0;
6192 }
6193
6194 /*
6195  * Write a response back to a 8051 request.
6196  */
6197 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6198 {
6199         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6200                   DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6201                   (u64)return_code <<
6202                   DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6203                   (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6204 }
6205
6206 /*
6207  * Handle host requests from the 8051.
6208  */
6209 static void handle_8051_request(struct hfi1_pportdata *ppd)
6210 {
6211         struct hfi1_devdata *dd = ppd->dd;
6212         u64 reg;
6213         u16 data = 0;
6214         u8 type;
6215
6216         reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6217         if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6218                 return; /* no request */
6219
6220         /* zero out COMPLETED so the response is seen */
6221         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6222
6223         /* extract request details */
6224         type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6225                         & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6226         data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6227                         & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6228
6229         switch (type) {
6230         case HREQ_LOAD_CONFIG:
6231         case HREQ_SAVE_CONFIG:
6232         case HREQ_READ_CONFIG:
6233         case HREQ_SET_TX_EQ_ABS:
6234         case HREQ_SET_TX_EQ_REL:
6235         case HREQ_ENABLE:
6236                 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6237                             type);
6238                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6239                 break;
6240         case HREQ_CONFIG_DONE:
6241                 hreq_response(dd, HREQ_SUCCESS, 0);
6242                 break;
6243
6244         case HREQ_INTERFACE_TEST:
6245                 hreq_response(dd, HREQ_SUCCESS, data);
6246                 break;
6247         default:
6248                 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6249                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6250                 break;
6251         }
6252 }
6253
6254 static void write_global_credit(struct hfi1_devdata *dd,
6255                                 u8 vau, u16 total, u16 shared)
6256 {
6257         write_csr(dd, SEND_CM_GLOBAL_CREDIT,
6258                   ((u64)total <<
6259                    SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) |
6260                   ((u64)shared <<
6261                    SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) |
6262                   ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT));
6263 }
6264
6265 /*
6266  * Set up initial VL15 credits of the remote.  Assumes the rest of
6267  * the CM credit registers are zero from a previous global or credit reset .
6268  */
6269 void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf)
6270 {
6271         /* leave shared count at zero for both global and VL15 */
6272         write_global_credit(dd, vau, vl15buf, 0);
6273
6274         /* We may need some credits for another VL when sending packets
6275          * with the snoop interface. Dividing it down the middle for VL15
6276          * and VL0 should suffice.
6277          */
6278         if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) {
6279                 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1)
6280                     << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6281                 write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1)
6282                     << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT);
6283         } else {
6284                 write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6285                         << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6286         }
6287 }
6288
6289 /*
6290  * Zero all credit details from the previous connection and
6291  * reset the CM manager's internal counters.
6292  */
6293 void reset_link_credits(struct hfi1_devdata *dd)
6294 {
6295         int i;
6296
6297         /* remove all previous VL credit limits */
6298         for (i = 0; i < TXE_NUM_DATA_VL; i++)
6299                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6300         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6301         write_global_credit(dd, 0, 0, 0);
6302         /* reset the CM block */
6303         pio_send_control(dd, PSC_CM_RESET);
6304 }
6305
6306 /* convert a vCU to a CU */
6307 static u32 vcu_to_cu(u8 vcu)
6308 {
6309         return 1 << vcu;
6310 }
6311
6312 /* convert a CU to a vCU */
6313 static u8 cu_to_vcu(u32 cu)
6314 {
6315         return ilog2(cu);
6316 }
6317
6318 /* convert a vAU to an AU */
6319 static u32 vau_to_au(u8 vau)
6320 {
6321         return 8 * (1 << vau);
6322 }
6323
6324 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6325 {
6326         ppd->sm_trap_qp = 0x0;
6327         ppd->sa_qp = 0x1;
6328 }
6329
6330 /*
6331  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6332  */
6333 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6334 {
6335         u64 reg;
6336
6337         /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6338         write_csr(dd, DC_LCB_CFG_RUN, 0);
6339         /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6340         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6341                   1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6342         /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6343         dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6344         reg = read_csr(dd, DCC_CFG_RESET);
6345         write_csr(dd, DCC_CFG_RESET, reg |
6346                   (1ull << DCC_CFG_RESET_RESET_LCB_SHIFT) |
6347                   (1ull << DCC_CFG_RESET_RESET_RX_FPE_SHIFT));
6348         (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6349         if (!abort) {
6350                 udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6351                 write_csr(dd, DCC_CFG_RESET, reg);
6352                 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6353         }
6354 }
6355
6356 /*
6357  * This routine should be called after the link has been transitioned to
6358  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6359  * reset).
6360  *
6361  * The expectation is that the caller of this routine would have taken
6362  * care of properly transitioning the link into the correct state.
6363  */
6364 static void dc_shutdown(struct hfi1_devdata *dd)
6365 {
6366         unsigned long flags;
6367
6368         spin_lock_irqsave(&dd->dc8051_lock, flags);
6369         if (dd->dc_shutdown) {
6370                 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6371                 return;
6372         }
6373         dd->dc_shutdown = 1;
6374         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6375         /* Shutdown the LCB */
6376         lcb_shutdown(dd, 1);
6377         /*
6378          * Going to OFFLINE would have causes the 8051 to put the
6379          * SerDes into reset already. Just need to shut down the 8051,
6380          * itself.
6381          */
6382         write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6383 }
6384
6385 /*
6386  * Calling this after the DC has been brought out of reset should not
6387  * do any damage.
6388  */
6389 static void dc_start(struct hfi1_devdata *dd)
6390 {
6391         unsigned long flags;
6392         int ret;
6393
6394         spin_lock_irqsave(&dd->dc8051_lock, flags);
6395         if (!dd->dc_shutdown)
6396                 goto done;
6397         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6398         /* Take the 8051 out of reset */
6399         write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6400         /* Wait until 8051 is ready */
6401         ret = wait_fm_ready(dd, TIMEOUT_8051_START);
6402         if (ret) {
6403                 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6404                            __func__);
6405         }
6406         /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6407         write_csr(dd, DCC_CFG_RESET, 0x10);
6408         /* lcb_shutdown() with abort=1 does not restore these */
6409         write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6410         spin_lock_irqsave(&dd->dc8051_lock, flags);
6411         dd->dc_shutdown = 0;
6412 done:
6413         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
6414 }
6415
6416 /*
6417  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6418  */
6419 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6420 {
6421         u64 rx_radr, tx_radr;
6422         u32 version;
6423
6424         if (dd->icode != ICODE_FPGA_EMULATION)
6425                 return;
6426
6427         /*
6428          * These LCB defaults on emulator _s are good, nothing to do here:
6429          *      LCB_CFG_TX_FIFOS_RADR
6430          *      LCB_CFG_RX_FIFOS_RADR
6431          *      LCB_CFG_LN_DCLK
6432          *      LCB_CFG_IGNORE_LOST_RCLK
6433          */
6434         if (is_emulator_s(dd))
6435                 return;
6436         /* else this is _p */
6437
6438         version = emulator_rev(dd);
6439         if (!is_ax(dd))
6440                 version = 0x2d; /* all B0 use 0x2d or higher settings */
6441
6442         if (version <= 0x12) {
6443                 /* release 0x12 and below */
6444
6445                 /*
6446                  * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6447                  * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6448                  * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6449                  */
6450                 rx_radr =
6451                       0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6452                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6453                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6454                 /*
6455                  * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6456                  * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6457                  */
6458                 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6459         } else if (version <= 0x18) {
6460                 /* release 0x13 up to 0x18 */
6461                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6462                 rx_radr =
6463                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6464                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6465                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6466                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6467         } else if (version == 0x19) {
6468                 /* release 0x19 */
6469                 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6470                 rx_radr =
6471                       0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6472                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6473                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6474                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6475         } else if (version == 0x1a) {
6476                 /* release 0x1a */
6477                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6478                 rx_radr =
6479                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6480                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6481                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6482                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6483                 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6484         } else {
6485                 /* release 0x1b and higher */
6486                 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6487                 rx_radr =
6488                       0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6489                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6490                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6491                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6492         }
6493
6494         write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6495         /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6496         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6497                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6498         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6499 }
6500
6501 /*
6502  * Handle a SMA idle message
6503  *
6504  * This is a work-queue function outside of the interrupt.
6505  */
6506 void handle_sma_message(struct work_struct *work)
6507 {
6508         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6509                                                         sma_message_work);
6510         struct hfi1_devdata *dd = ppd->dd;
6511         u64 msg;
6512         int ret;
6513
6514         /*
6515          * msg is bytes 1-4 of the 40-bit idle message - the command code
6516          * is stripped off
6517          */
6518         ret = read_idle_sma(dd, &msg);
6519         if (ret)
6520                 return;
6521         dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6522         /*
6523          * React to the SMA message.  Byte[1] (0 for us) is the command.
6524          */
6525         switch (msg & 0xff) {
6526         case SMA_IDLE_ARM:
6527                 /*
6528                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6529                  * State Transitions
6530                  *
6531                  * Only expected in INIT or ARMED, discard otherwise.
6532                  */
6533                 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6534                         ppd->neighbor_normal = 1;
6535                 break;
6536         case SMA_IDLE_ACTIVE:
6537                 /*
6538                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6539                  * State Transitions
6540                  *
6541                  * Can activate the node.  Discard otherwise.
6542                  */
6543                 if (ppd->host_link_state == HLS_UP_ARMED &&
6544                     ppd->is_active_optimize_enabled) {
6545                         ppd->neighbor_normal = 1;
6546                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
6547                         if (ret)
6548                                 dd_dev_err(
6549                                         dd,
6550                                         "%s: received Active SMA idle message, couldn't set link to Active\n",
6551                                         __func__);
6552                 }
6553                 break;
6554         default:
6555                 dd_dev_err(dd,
6556                            "%s: received unexpected SMA idle message 0x%llx\n",
6557                            __func__, msg);
6558                 break;
6559         }
6560 }
6561
6562 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6563 {
6564         u64 rcvctrl;
6565         unsigned long flags;
6566
6567         spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6568         rcvctrl = read_csr(dd, RCV_CTRL);
6569         rcvctrl |= add;
6570         rcvctrl &= ~clear;
6571         write_csr(dd, RCV_CTRL, rcvctrl);
6572         spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6573 }
6574
6575 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6576 {
6577         adjust_rcvctrl(dd, add, 0);
6578 }
6579
6580 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6581 {
6582         adjust_rcvctrl(dd, 0, clear);
6583 }
6584
6585 /*
6586  * Called from all interrupt handlers to start handling an SPC freeze.
6587  */
6588 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6589 {
6590         struct hfi1_devdata *dd = ppd->dd;
6591         struct send_context *sc;
6592         int i;
6593
6594         if (flags & FREEZE_SELF)
6595                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6596
6597         /* enter frozen mode */
6598         dd->flags |= HFI1_FROZEN;
6599
6600         /* notify all SDMA engines that they are going into a freeze */
6601         sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6602
6603         /* do halt pre-handling on all enabled send contexts */
6604         for (i = 0; i < dd->num_send_contexts; i++) {
6605                 sc = dd->send_contexts[i].sc;
6606                 if (sc && (sc->flags & SCF_ENABLED))
6607                         sc_stop(sc, SCF_FROZEN | SCF_HALTED);
6608         }
6609
6610         /* Send context are frozen. Notify user space */
6611         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6612
6613         if (flags & FREEZE_ABORT) {
6614                 dd_dev_err(dd,
6615                            "Aborted freeze recovery. Please REBOOT system\n");
6616                 return;
6617         }
6618         /* queue non-interrupt handler */
6619         queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6620 }
6621
6622 /*
6623  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6624  * depending on the "freeze" parameter.
6625  *
6626  * No need to return an error if it times out, our only option
6627  * is to proceed anyway.
6628  */
6629 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6630 {
6631         unsigned long timeout;
6632         u64 reg;
6633
6634         timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6635         while (1) {
6636                 reg = read_csr(dd, CCE_STATUS);
6637                 if (freeze) {
6638                         /* waiting until all indicators are set */
6639                         if ((reg & ALL_FROZE) == ALL_FROZE)
6640                                 return; /* all done */
6641                 } else {
6642                         /* waiting until all indicators are clear */
6643                         if ((reg & ALL_FROZE) == 0)
6644                                 return; /* all done */
6645                 }
6646
6647                 if (time_after(jiffies, timeout)) {
6648                         dd_dev_err(dd,
6649                                    "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6650                                    freeze ? "" : "un", reg & ALL_FROZE,
6651                                    freeze ? ALL_FROZE : 0ull);
6652                         return;
6653                 }
6654                 usleep_range(80, 120);
6655         }
6656 }
6657
6658 /*
6659  * Do all freeze handling for the RXE block.
6660  */
6661 static void rxe_freeze(struct hfi1_devdata *dd)
6662 {
6663         int i;
6664
6665         /* disable port */
6666         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6667
6668         /* disable all receive contexts */
6669         for (i = 0; i < dd->num_rcv_contexts; i++)
6670                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, i);
6671 }
6672
6673 /*
6674  * Unfreeze handling for the RXE block - kernel contexts only.
6675  * This will also enable the port.  User contexts will do unfreeze
6676  * handling on a per-context basis as they call into the driver.
6677  *
6678  */
6679 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6680 {
6681         u32 rcvmask;
6682         int i;
6683
6684         /* enable all kernel contexts */
6685         for (i = 0; i < dd->n_krcv_queues; i++) {
6686                 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6687                 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6688                 rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ?
6689                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6690                 hfi1_rcvctrl(dd, rcvmask, i);
6691         }
6692
6693         /* enable port */
6694         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6695 }
6696
6697 /*
6698  * Non-interrupt SPC freeze handling.
6699  *
6700  * This is a work-queue function outside of the triggering interrupt.
6701  */
6702 void handle_freeze(struct work_struct *work)
6703 {
6704         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6705                                                                 freeze_work);
6706         struct hfi1_devdata *dd = ppd->dd;
6707
6708         /* wait for freeze indicators on all affected blocks */
6709         wait_for_freeze_status(dd, 1);
6710
6711         /* SPC is now frozen */
6712
6713         /* do send PIO freeze steps */
6714         pio_freeze(dd);
6715
6716         /* do send DMA freeze steps */
6717         sdma_freeze(dd);
6718
6719         /* do send egress freeze steps - nothing to do */
6720
6721         /* do receive freeze steps */
6722         rxe_freeze(dd);
6723
6724         /*
6725          * Unfreeze the hardware - clear the freeze, wait for each
6726          * block's frozen bit to clear, then clear the frozen flag.
6727          */
6728         write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6729         wait_for_freeze_status(dd, 0);
6730
6731         if (is_ax(dd)) {
6732                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6733                 wait_for_freeze_status(dd, 1);
6734                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6735                 wait_for_freeze_status(dd, 0);
6736         }
6737
6738         /* do send PIO unfreeze steps for kernel contexts */
6739         pio_kernel_unfreeze(dd);
6740
6741         /* do send DMA unfreeze steps */
6742         sdma_unfreeze(dd);
6743
6744         /* do send egress unfreeze steps - nothing to do */
6745
6746         /* do receive unfreeze steps for kernel contexts */
6747         rxe_kernel_unfreeze(dd);
6748
6749         /*
6750          * The unfreeze procedure touches global device registers when
6751          * it disables and re-enables RXE. Mark the device unfrozen
6752          * after all that is done so other parts of the driver waiting
6753          * for the device to unfreeze don't do things out of order.
6754          *
6755          * The above implies that the meaning of HFI1_FROZEN flag is
6756          * "Device has gone into freeze mode and freeze mode handling
6757          * is still in progress."
6758          *
6759          * The flag will be removed when freeze mode processing has
6760          * completed.
6761          */
6762         dd->flags &= ~HFI1_FROZEN;
6763         wake_up(&dd->event_queue);
6764
6765         /* no longer frozen */
6766 }
6767
6768 /*
6769  * Handle a link up interrupt from the 8051.
6770  *
6771  * This is a work-queue function outside of the interrupt.
6772  */
6773 void handle_link_up(struct work_struct *work)
6774 {
6775         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6776                                                   link_up_work);
6777         set_link_state(ppd, HLS_UP_INIT);
6778
6779         /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6780         read_ltp_rtt(ppd->dd);
6781         /*
6782          * OPA specifies that certain counters are cleared on a transition
6783          * to link up, so do that.
6784          */
6785         clear_linkup_counters(ppd->dd);
6786         /*
6787          * And (re)set link up default values.
6788          */
6789         set_linkup_defaults(ppd);
6790
6791         /* enforce link speed enabled */
6792         if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
6793                 /* oops - current speed is not enabled, bounce */
6794                 dd_dev_err(ppd->dd,
6795                            "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
6796                            ppd->link_speed_active, ppd->link_speed_enabled);
6797                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
6798                                      OPA_LINKDOWN_REASON_SPEED_POLICY);
6799                 set_link_state(ppd, HLS_DN_OFFLINE);
6800                 tune_serdes(ppd);
6801                 start_link(ppd);
6802         }
6803 }
6804
6805 /*
6806  * Several pieces of LNI information were cached for SMA in ppd.
6807  * Reset these on link down
6808  */
6809 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
6810 {
6811         ppd->neighbor_guid = 0;
6812         ppd->neighbor_port_number = 0;
6813         ppd->neighbor_type = 0;
6814         ppd->neighbor_fm_security = 0;
6815 }
6816
6817 static const char * const link_down_reason_strs[] = {
6818         [OPA_LINKDOWN_REASON_NONE] = "None",
6819         [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Recive error 0",
6820         [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
6821         [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
6822         [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
6823         [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
6824         [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
6825         [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
6826         [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
6827         [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
6828         [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
6829         [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
6830         [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
6831         [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
6832         [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
6833         [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
6834         [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
6835         [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
6836         [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
6837         [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
6838         [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
6839         [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
6840         [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
6841         [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
6842         [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
6843         [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
6844         [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
6845         [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
6846         [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
6847         [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
6848         [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
6849         [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
6850         [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
6851                                         "Excessive buffer overrun",
6852         [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
6853         [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
6854         [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
6855         [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
6856         [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
6857         [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
6858         [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
6859         [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
6860                                         "Local media not installed",
6861         [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
6862         [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
6863         [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
6864                                         "End to end not installed",
6865         [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
6866         [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
6867         [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
6868         [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
6869         [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
6870         [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
6871 };
6872
6873 /* return the neighbor link down reason string */
6874 static const char *link_down_reason_str(u8 reason)
6875 {
6876         const char *str = NULL;
6877
6878         if (reason < ARRAY_SIZE(link_down_reason_strs))
6879                 str = link_down_reason_strs[reason];
6880         if (!str)
6881                 str = "(invalid)";
6882
6883         return str;
6884 }
6885
6886 /*
6887  * Handle a link down interrupt from the 8051.
6888  *
6889  * This is a work-queue function outside of the interrupt.
6890  */
6891 void handle_link_down(struct work_struct *work)
6892 {
6893         u8 lcl_reason, neigh_reason = 0;
6894         u8 link_down_reason;
6895         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6896                                                   link_down_work);
6897         int was_up;
6898         static const char ldr_str[] = "Link down reason: ";
6899
6900         if ((ppd->host_link_state &
6901              (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
6902              ppd->port_type == PORT_TYPE_FIXED)
6903                 ppd->offline_disabled_reason =
6904                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
6905
6906         /* Go offline first, then deal with reading/writing through 8051 */
6907         was_up = !!(ppd->host_link_state & HLS_UP);
6908         set_link_state(ppd, HLS_DN_OFFLINE);
6909
6910         if (was_up) {
6911                 lcl_reason = 0;
6912                 /* link down reason is only valid if the link was up */
6913                 read_link_down_reason(ppd->dd, &link_down_reason);
6914                 switch (link_down_reason) {
6915                 case LDR_LINK_TRANSFER_ACTIVE_LOW:
6916                         /* the link went down, no idle message reason */
6917                         dd_dev_info(ppd->dd, "%sUnexpected link down\n",
6918                                     ldr_str);
6919                         break;
6920                 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
6921                         /*
6922                          * The neighbor reason is only valid if an idle message
6923                          * was received for it.
6924                          */
6925                         read_planned_down_reason_code(ppd->dd, &neigh_reason);
6926                         dd_dev_info(ppd->dd,
6927                                     "%sNeighbor link down message %d, %s\n",
6928                                     ldr_str, neigh_reason,
6929                                     link_down_reason_str(neigh_reason));
6930                         break;
6931                 case LDR_RECEIVED_HOST_OFFLINE_REQ:
6932                         dd_dev_info(ppd->dd,
6933                                     "%sHost requested link to go offline\n",
6934                                     ldr_str);
6935                         break;
6936                 default:
6937                         dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
6938                                     ldr_str, link_down_reason);
6939                         break;
6940                 }
6941
6942                 /*
6943                  * If no reason, assume peer-initiated but missed
6944                  * LinkGoingDown idle flits.
6945                  */
6946                 if (neigh_reason == 0)
6947                         lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
6948         } else {
6949                 /* went down while polling or going up */
6950                 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
6951         }
6952
6953         set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
6954
6955         /* inform the SMA when the link transitions from up to down */
6956         if (was_up && ppd->local_link_down_reason.sma == 0 &&
6957             ppd->neigh_link_down_reason.sma == 0) {
6958                 ppd->local_link_down_reason.sma =
6959                                         ppd->local_link_down_reason.latest;
6960                 ppd->neigh_link_down_reason.sma =
6961                                         ppd->neigh_link_down_reason.latest;
6962         }
6963
6964         reset_neighbor_info(ppd);
6965
6966         /* disable the port */
6967         clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6968
6969         /*
6970          * If there is no cable attached, turn the DC off. Otherwise,
6971          * start the link bring up.
6972          */
6973         if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd)) {
6974                 dc_shutdown(ppd->dd);
6975         } else {
6976                 tune_serdes(ppd);
6977                 start_link(ppd);
6978         }
6979 }
6980
6981 void handle_link_bounce(struct work_struct *work)
6982 {
6983         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6984                                                         link_bounce_work);
6985
6986         /*
6987          * Only do something if the link is currently up.
6988          */
6989         if (ppd->host_link_state & HLS_UP) {
6990                 set_link_state(ppd, HLS_DN_OFFLINE);
6991                 tune_serdes(ppd);
6992                 start_link(ppd);
6993         } else {
6994                 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
6995                             __func__, link_state_name(ppd->host_link_state));
6996         }
6997 }
6998
6999 /*
7000  * Mask conversion: Capability exchange to Port LTP.  The capability
7001  * exchange has an implicit 16b CRC that is mandatory.
7002  */
7003 static int cap_to_port_ltp(int cap)
7004 {
7005         int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7006
7007         if (cap & CAP_CRC_14B)
7008                 port_ltp |= PORT_LTP_CRC_MODE_14;
7009         if (cap & CAP_CRC_48B)
7010                 port_ltp |= PORT_LTP_CRC_MODE_48;
7011         if (cap & CAP_CRC_12B_16B_PER_LANE)
7012                 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7013
7014         return port_ltp;
7015 }
7016
7017 /*
7018  * Convert an OPA Port LTP mask to capability mask
7019  */
7020 int port_ltp_to_cap(int port_ltp)
7021 {
7022         int cap_mask = 0;
7023
7024         if (port_ltp & PORT_LTP_CRC_MODE_14)
7025                 cap_mask |= CAP_CRC_14B;
7026         if (port_ltp & PORT_LTP_CRC_MODE_48)
7027                 cap_mask |= CAP_CRC_48B;
7028         if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7029                 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7030
7031         return cap_mask;
7032 }
7033
7034 /*
7035  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7036  */
7037 static int lcb_to_port_ltp(int lcb_crc)
7038 {
7039         int port_ltp = 0;
7040
7041         if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7042                 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7043         else if (lcb_crc == LCB_CRC_48B)
7044                 port_ltp = PORT_LTP_CRC_MODE_48;
7045         else if (lcb_crc == LCB_CRC_14B)
7046                 port_ltp = PORT_LTP_CRC_MODE_14;
7047         else
7048                 port_ltp = PORT_LTP_CRC_MODE_16;
7049
7050         return port_ltp;
7051 }
7052
7053 /*
7054  * Our neighbor has indicated that we are allowed to act as a fabric
7055  * manager, so place the full management partition key in the second
7056  * (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
7057  * that we should already have the limited management partition key in
7058  * array element 1, and also that the port is not yet up when
7059  * add_full_mgmt_pkey() is invoked.
7060  */
7061 static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7062 {
7063         struct hfi1_devdata *dd = ppd->dd;
7064
7065         /* Sanity check - ppd->pkeys[2] should be 0, or already initalized */
7066         if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
7067                 dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
7068                             __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
7069         ppd->pkeys[2] = FULL_MGMT_P_KEY;
7070         (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7071         hfi1_event_pkey_change(ppd->dd, ppd->port);
7072 }
7073
7074 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7075 {
7076         if (ppd->pkeys[2] != 0) {
7077                 ppd->pkeys[2] = 0;
7078                 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7079                 hfi1_event_pkey_change(ppd->dd, ppd->port);
7080         }
7081 }
7082
7083 /*
7084  * Convert the given link width to the OPA link width bitmask.
7085  */
7086 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7087 {
7088         switch (width) {
7089         case 0:
7090                 /*
7091                  * Simulator and quick linkup do not set the width.
7092                  * Just set it to 4x without complaint.
7093                  */
7094                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7095                         return OPA_LINK_WIDTH_4X;
7096                 return 0; /* no lanes up */
7097         case 1: return OPA_LINK_WIDTH_1X;
7098         case 2: return OPA_LINK_WIDTH_2X;
7099         case 3: return OPA_LINK_WIDTH_3X;
7100         default:
7101                 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7102                             __func__, width);
7103                 /* fall through */
7104         case 4: return OPA_LINK_WIDTH_4X;
7105         }
7106 }
7107
7108 /*
7109  * Do a population count on the bottom nibble.
7110  */
7111 static const u8 bit_counts[16] = {
7112         0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7113 };
7114
7115 static inline u8 nibble_to_count(u8 nibble)
7116 {
7117         return bit_counts[nibble & 0xf];
7118 }
7119
7120 /*
7121  * Read the active lane information from the 8051 registers and return
7122  * their widths.
7123  *
7124  * Active lane information is found in these 8051 registers:
7125  *      enable_lane_tx
7126  *      enable_lane_rx
7127  */
7128 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7129                             u16 *rx_width)
7130 {
7131         u16 tx, rx;
7132         u8 enable_lane_rx;
7133         u8 enable_lane_tx;
7134         u8 tx_polarity_inversion;
7135         u8 rx_polarity_inversion;
7136         u8 max_rate;
7137
7138         /* read the active lanes */
7139         read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7140                          &rx_polarity_inversion, &max_rate);
7141         read_local_lni(dd, &enable_lane_rx);
7142
7143         /* convert to counts */
7144         tx = nibble_to_count(enable_lane_tx);
7145         rx = nibble_to_count(enable_lane_rx);
7146
7147         /*
7148          * Set link_speed_active here, overriding what was set in
7149          * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7150          * set the max_rate field in handle_verify_cap until v0.19.
7151          */
7152         if ((dd->icode == ICODE_RTL_SILICON) &&
7153             (dd->dc8051_ver < dc8051_ver(0, 19))) {
7154                 /* max_rate: 0 = 12.5G, 1 = 25G */
7155                 switch (max_rate) {
7156                 case 0:
7157                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7158                         break;
7159                 default:
7160                         dd_dev_err(dd,
7161                                    "%s: unexpected max rate %d, using 25Gb\n",
7162                                    __func__, (int)max_rate);
7163                         /* fall through */
7164                 case 1:
7165                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7166                         break;
7167                 }
7168         }
7169
7170         dd_dev_info(dd,
7171                     "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7172                     enable_lane_tx, tx, enable_lane_rx, rx);
7173         *tx_width = link_width_to_bits(dd, tx);
7174         *rx_width = link_width_to_bits(dd, rx);
7175 }
7176
7177 /*
7178  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7179  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7180  * after link up.  I.e. look elsewhere for downgrade information.
7181  *
7182  * Bits are:
7183  *      + bits [7:4] contain the number of active transmitters
7184  *      + bits [3:0] contain the number of active receivers
7185  * These are numbers 1 through 4 and can be different values if the
7186  * link is asymmetric.
7187  *
7188  * verify_cap_local_fm_link_width[0] retains its original value.
7189  */
7190 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7191                               u16 *rx_width)
7192 {
7193         u16 widths, tx, rx;
7194         u8 misc_bits, local_flags;
7195         u16 active_tx, active_rx;
7196
7197         read_vc_local_link_width(dd, &misc_bits, &local_flags, &widths);
7198         tx = widths >> 12;
7199         rx = (widths >> 8) & 0xf;
7200
7201         *tx_width = link_width_to_bits(dd, tx);
7202         *rx_width = link_width_to_bits(dd, rx);
7203
7204         /* print the active widths */
7205         get_link_widths(dd, &active_tx, &active_rx);
7206 }
7207
7208 /*
7209  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7210  * hardware information when the link first comes up.
7211  *
7212  * The link width is not available until after VerifyCap.AllFramesReceived
7213  * (the trigger for handle_verify_cap), so this is outside that routine
7214  * and should be called when the 8051 signals linkup.
7215  */
7216 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7217 {
7218         u16 tx_width, rx_width;
7219
7220         /* get end-of-LNI link widths */
7221         get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7222
7223         /* use tx_width as the link is supposed to be symmetric on link up */
7224         ppd->link_width_active = tx_width;
7225         /* link width downgrade active (LWD.A) starts out matching LW.A */
7226         ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7227         ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7228         /* per OPA spec, on link up LWD.E resets to LWD.S */
7229         ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7230         /* cache the active egress rate (units {10^6 bits/sec]) */
7231         ppd->current_egress_rate = active_egress_rate(ppd);
7232 }
7233
7234 /*
7235  * Handle a verify capabilities interrupt from the 8051.
7236  *
7237  * This is a work-queue function outside of the interrupt.
7238  */
7239 void handle_verify_cap(struct work_struct *work)
7240 {
7241         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7242                                                                 link_vc_work);
7243         struct hfi1_devdata *dd = ppd->dd;
7244         u64 reg;
7245         u8 power_management;
7246         u8 continious;
7247         u8 vcu;
7248         u8 vau;
7249         u8 z;
7250         u16 vl15buf;
7251         u16 link_widths;
7252         u16 crc_mask;
7253         u16 crc_val;
7254         u16 device_id;
7255         u16 active_tx, active_rx;
7256         u8 partner_supported_crc;
7257         u8 remote_tx_rate;
7258         u8 device_rev;
7259
7260         set_link_state(ppd, HLS_VERIFY_CAP);
7261
7262         lcb_shutdown(dd, 0);
7263         adjust_lcb_for_fpga_serdes(dd);
7264
7265         /*
7266          * These are now valid:
7267          *      remote VerifyCap fields in the general LNI config
7268          *      CSR DC8051_STS_REMOTE_GUID
7269          *      CSR DC8051_STS_REMOTE_NODE_TYPE
7270          *      CSR DC8051_STS_REMOTE_FM_SECURITY
7271          *      CSR DC8051_STS_REMOTE_PORT_NO
7272          */
7273
7274         read_vc_remote_phy(dd, &power_management, &continious);
7275         read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7276                               &partner_supported_crc);
7277         read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7278         read_remote_device_id(dd, &device_id, &device_rev);
7279         /*
7280          * And the 'MgmtAllowed' information, which is exchanged during
7281          * LNI, is also be available at this point.
7282          */
7283         read_mgmt_allowed(dd, &ppd->mgmt_allowed);
7284         /* print the active widths */
7285         get_link_widths(dd, &active_tx, &active_rx);
7286         dd_dev_info(dd,
7287                     "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7288                     (int)power_management, (int)continious);
7289         dd_dev_info(dd,
7290                     "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7291                     (int)vau, (int)z, (int)vcu, (int)vl15buf,
7292                     (int)partner_supported_crc);
7293         dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7294                     (u32)remote_tx_rate, (u32)link_widths);
7295         dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7296                     (u32)device_id, (u32)device_rev);
7297         /*
7298          * The peer vAU value just read is the peer receiver value.  HFI does
7299          * not support a transmit vAU of 0 (AU == 8).  We advertised that
7300          * with Z=1 in the fabric capabilities sent to the peer.  The peer
7301          * will see our Z=1, and, if it advertised a vAU of 0, will move its
7302          * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7303          * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7304          * subject to the Z value exception.
7305          */
7306         if (vau == 0)
7307                 vau = 1;
7308         set_up_vl15(dd, vau, vl15buf);
7309
7310         /* set up the LCB CRC mode */
7311         crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7312
7313         /* order is important: use the lowest bit in common */
7314         if (crc_mask & CAP_CRC_14B)
7315                 crc_val = LCB_CRC_14B;
7316         else if (crc_mask & CAP_CRC_48B)
7317                 crc_val = LCB_CRC_48B;
7318         else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7319                 crc_val = LCB_CRC_12B_16B_PER_LANE;
7320         else
7321                 crc_val = LCB_CRC_16B;
7322
7323         dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7324         write_csr(dd, DC_LCB_CFG_CRC_MODE,
7325                   (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7326
7327         /* set (14b only) or clear sideband credit */
7328         reg = read_csr(dd, SEND_CM_CTRL);
7329         if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7330                 write_csr(dd, SEND_CM_CTRL,
7331                           reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7332         } else {
7333                 write_csr(dd, SEND_CM_CTRL,
7334                           reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7335         }
7336
7337         ppd->link_speed_active = 0;     /* invalid value */
7338         if (dd->dc8051_ver < dc8051_ver(0, 20)) {
7339                 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7340                 switch (remote_tx_rate) {
7341                 case 0:
7342                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7343                         break;
7344                 case 1:
7345                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7346                         break;
7347                 }
7348         } else {
7349                 /* actual rate is highest bit of the ANDed rates */
7350                 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7351
7352                 if (rate & 2)
7353                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7354                 else if (rate & 1)
7355                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7356         }
7357         if (ppd->link_speed_active == 0) {
7358                 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7359                            __func__, (int)remote_tx_rate);
7360                 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7361         }
7362
7363         /*
7364          * Cache the values of the supported, enabled, and active
7365          * LTP CRC modes to return in 'portinfo' queries. But the bit
7366          * flags that are returned in the portinfo query differ from
7367          * what's in the link_crc_mask, crc_sizes, and crc_val
7368          * variables. Convert these here.
7369          */
7370         ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7371                 /* supported crc modes */
7372         ppd->port_ltp_crc_mode |=
7373                 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7374                 /* enabled crc modes */
7375         ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7376                 /* active crc mode */
7377
7378         /* set up the remote credit return table */
7379         assign_remote_cm_au_table(dd, vcu);
7380
7381         /*
7382          * The LCB is reset on entry to handle_verify_cap(), so this must
7383          * be applied on every link up.
7384          *
7385          * Adjust LCB error kill enable to kill the link if
7386          * these RBUF errors are seen:
7387          *      REPLAY_BUF_MBE_SMASK
7388          *      FLIT_INPUT_BUF_MBE_SMASK
7389          */
7390         if (is_ax(dd)) {                        /* fixed in B0 */
7391                 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7392                 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7393                         | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7394                 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7395         }
7396
7397         /* pull LCB fifos out of reset - all fifo clocks must be stable */
7398         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7399
7400         /* give 8051 access to the LCB CSRs */
7401         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7402         set_8051_lcb_access(dd);
7403
7404         ppd->neighbor_guid =
7405                 read_csr(dd, DC_DC8051_STS_REMOTE_GUID);
7406         ppd->neighbor_port_number = read_csr(dd, DC_DC8051_STS_REMOTE_PORT_NO) &
7407                                         DC_DC8051_STS_REMOTE_PORT_NO_VAL_SMASK;
7408         ppd->neighbor_type =
7409                 read_csr(dd, DC_DC8051_STS_REMOTE_NODE_TYPE) &
7410                 DC_DC8051_STS_REMOTE_NODE_TYPE_VAL_MASK;
7411         ppd->neighbor_fm_security =
7412                 read_csr(dd, DC_DC8051_STS_REMOTE_FM_SECURITY) &
7413                 DC_DC8051_STS_LOCAL_FM_SECURITY_DISABLED_MASK;
7414         dd_dev_info(dd,
7415                     "Neighbor Guid: %llx Neighbor type %d MgmtAllowed %d FM security bypass %d\n",
7416                     ppd->neighbor_guid, ppd->neighbor_type,
7417                     ppd->mgmt_allowed, ppd->neighbor_fm_security);
7418         if (ppd->mgmt_allowed)
7419                 add_full_mgmt_pkey(ppd);
7420
7421         /* tell the 8051 to go to LinkUp */
7422         set_link_state(ppd, HLS_GOING_UP);
7423 }
7424
7425 /*
7426  * Apply the link width downgrade enabled policy against the current active
7427  * link widths.
7428  *
7429  * Called when the enabled policy changes or the active link widths change.
7430  */
7431 void apply_link_downgrade_policy(struct hfi1_pportdata *ppd, int refresh_widths)
7432 {
7433         int do_bounce = 0;
7434         int tries;
7435         u16 lwde;
7436         u16 tx, rx;
7437
7438         /* use the hls lock to avoid a race with actual link up */
7439         tries = 0;
7440 retry:
7441         mutex_lock(&ppd->hls_lock);
7442         /* only apply if the link is up */
7443         if (ppd->host_link_state & HLS_DOWN) {
7444                 /* still going up..wait and retry */
7445                 if (ppd->host_link_state & HLS_GOING_UP) {
7446                         if (++tries < 1000) {
7447                                 mutex_unlock(&ppd->hls_lock);
7448                                 usleep_range(100, 120); /* arbitrary */
7449                                 goto retry;
7450                         }
7451                         dd_dev_err(ppd->dd,
7452                                    "%s: giving up waiting for link state change\n",
7453                                    __func__);
7454                 }
7455                 goto done;
7456         }
7457
7458         lwde = ppd->link_width_downgrade_enabled;
7459
7460         if (refresh_widths) {
7461                 get_link_widths(ppd->dd, &tx, &rx);
7462                 ppd->link_width_downgrade_tx_active = tx;
7463                 ppd->link_width_downgrade_rx_active = rx;
7464         }
7465
7466         if (ppd->link_width_downgrade_tx_active == 0 ||
7467             ppd->link_width_downgrade_rx_active == 0) {
7468                 /* the 8051 reported a dead link as a downgrade */
7469                 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7470         } else if (lwde == 0) {
7471                 /* downgrade is disabled */
7472
7473                 /* bounce if not at starting active width */
7474                 if ((ppd->link_width_active !=
7475                      ppd->link_width_downgrade_tx_active) ||
7476                     (ppd->link_width_active !=
7477                      ppd->link_width_downgrade_rx_active)) {
7478                         dd_dev_err(ppd->dd,
7479                                    "Link downgrade is disabled and link has downgraded, downing link\n");
7480                         dd_dev_err(ppd->dd,
7481                                    "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7482                                    ppd->link_width_active,
7483                                    ppd->link_width_downgrade_tx_active,
7484                                    ppd->link_width_downgrade_rx_active);
7485                         do_bounce = 1;
7486                 }
7487         } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7488                    (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7489                 /* Tx or Rx is outside the enabled policy */
7490                 dd_dev_err(ppd->dd,
7491                            "Link is outside of downgrade allowed, downing link\n");
7492                 dd_dev_err(ppd->dd,
7493                            "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7494                            lwde, ppd->link_width_downgrade_tx_active,
7495                            ppd->link_width_downgrade_rx_active);
7496                 do_bounce = 1;
7497         }
7498
7499 done:
7500         mutex_unlock(&ppd->hls_lock);
7501
7502         if (do_bounce) {
7503                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7504                                      OPA_LINKDOWN_REASON_WIDTH_POLICY);
7505                 set_link_state(ppd, HLS_DN_OFFLINE);
7506                 tune_serdes(ppd);
7507                 start_link(ppd);
7508         }
7509 }
7510
7511 /*
7512  * Handle a link downgrade interrupt from the 8051.
7513  *
7514  * This is a work-queue function outside of the interrupt.
7515  */
7516 void handle_link_downgrade(struct work_struct *work)
7517 {
7518         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7519                                                         link_downgrade_work);
7520
7521         dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7522         apply_link_downgrade_policy(ppd, 1);
7523 }
7524
7525 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7526 {
7527         return flag_string(buf, buf_len, flags, dcc_err_flags,
7528                 ARRAY_SIZE(dcc_err_flags));
7529 }
7530
7531 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7532 {
7533         return flag_string(buf, buf_len, flags, lcb_err_flags,
7534                 ARRAY_SIZE(lcb_err_flags));
7535 }
7536
7537 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7538 {
7539         return flag_string(buf, buf_len, flags, dc8051_err_flags,
7540                 ARRAY_SIZE(dc8051_err_flags));
7541 }
7542
7543 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7544 {
7545         return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7546                 ARRAY_SIZE(dc8051_info_err_flags));
7547 }
7548
7549 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7550 {
7551         return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7552                 ARRAY_SIZE(dc8051_info_host_msg_flags));
7553 }
7554
7555 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7556 {
7557         struct hfi1_pportdata *ppd = dd->pport;
7558         u64 info, err, host_msg;
7559         int queue_link_down = 0;
7560         char buf[96];
7561
7562         /* look at the flags */
7563         if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7564                 /* 8051 information set by firmware */
7565                 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7566                 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7567                 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7568                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7569                 host_msg = (info >>
7570                         DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7571                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7572
7573                 /*
7574                  * Handle error flags.
7575                  */
7576                 if (err & FAILED_LNI) {
7577                         /*
7578                          * LNI error indications are cleared by the 8051
7579                          * only when starting polling.  Only pay attention
7580                          * to them when in the states that occur during
7581                          * LNI.
7582                          */
7583                         if (ppd->host_link_state
7584                             & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7585                                 queue_link_down = 1;
7586                                 dd_dev_info(dd, "Link error: %s\n",
7587                                             dc8051_info_err_string(buf,
7588                                                                    sizeof(buf),
7589                                                                    err &
7590                                                                    FAILED_LNI));
7591                         }
7592                         err &= ~(u64)FAILED_LNI;
7593                 }
7594                 /* unknown frames can happen durning LNI, just count */
7595                 if (err & UNKNOWN_FRAME) {
7596                         ppd->unknown_frame_count++;
7597                         err &= ~(u64)UNKNOWN_FRAME;
7598                 }
7599                 if (err) {
7600                         /* report remaining errors, but do not do anything */
7601                         dd_dev_err(dd, "8051 info error: %s\n",
7602                                    dc8051_info_err_string(buf, sizeof(buf),
7603                                                           err));
7604                 }
7605
7606                 /*
7607                  * Handle host message flags.
7608                  */
7609                 if (host_msg & HOST_REQ_DONE) {
7610                         /*
7611                          * Presently, the driver does a busy wait for
7612                          * host requests to complete.  This is only an
7613                          * informational message.
7614                          * NOTE: The 8051 clears the host message
7615                          * information *on the next 8051 command*.
7616                          * Therefore, when linkup is achieved,
7617                          * this flag will still be set.
7618                          */
7619                         host_msg &= ~(u64)HOST_REQ_DONE;
7620                 }
7621                 if (host_msg & BC_SMA_MSG) {
7622                         queue_work(ppd->hfi1_wq, &ppd->sma_message_work);
7623                         host_msg &= ~(u64)BC_SMA_MSG;
7624                 }
7625                 if (host_msg & LINKUP_ACHIEVED) {
7626                         dd_dev_info(dd, "8051: Link up\n");
7627                         queue_work(ppd->hfi1_wq, &ppd->link_up_work);
7628                         host_msg &= ~(u64)LINKUP_ACHIEVED;
7629                 }
7630                 if (host_msg & EXT_DEVICE_CFG_REQ) {
7631                         handle_8051_request(ppd);
7632                         host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7633                 }
7634                 if (host_msg & VERIFY_CAP_FRAME) {
7635                         queue_work(ppd->hfi1_wq, &ppd->link_vc_work);
7636                         host_msg &= ~(u64)VERIFY_CAP_FRAME;
7637                 }
7638                 if (host_msg & LINK_GOING_DOWN) {
7639                         const char *extra = "";
7640                         /* no downgrade action needed if going down */
7641                         if (host_msg & LINK_WIDTH_DOWNGRADED) {
7642                                 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7643                                 extra = " (ignoring downgrade)";
7644                         }
7645                         dd_dev_info(dd, "8051: Link down%s\n", extra);
7646                         queue_link_down = 1;
7647                         host_msg &= ~(u64)LINK_GOING_DOWN;
7648                 }
7649                 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7650                         queue_work(ppd->hfi1_wq, &ppd->link_downgrade_work);
7651                         host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7652                 }
7653                 if (host_msg) {
7654                         /* report remaining messages, but do not do anything */
7655                         dd_dev_info(dd, "8051 info host message: %s\n",
7656                                     dc8051_info_host_msg_string(buf,
7657                                                                 sizeof(buf),
7658                                                                 host_msg));
7659                 }
7660
7661                 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7662         }
7663         if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7664                 /*
7665                  * Lost the 8051 heartbeat.  If this happens, we
7666                  * receive constant interrupts about it.  Disable
7667                  * the interrupt after the first.
7668                  */
7669                 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7670                 write_csr(dd, DC_DC8051_ERR_EN,
7671                           read_csr(dd, DC_DC8051_ERR_EN) &
7672                           ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7673
7674                 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7675         }
7676         if (reg) {
7677                 /* report the error, but do not do anything */
7678                 dd_dev_err(dd, "8051 error: %s\n",
7679                            dc8051_err_string(buf, sizeof(buf), reg));
7680         }
7681
7682         if (queue_link_down) {
7683                 /*
7684                  * if the link is already going down or disabled, do not
7685                  * queue another
7686                  */
7687                 if ((ppd->host_link_state &
7688                     (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7689                     ppd->link_enabled == 0) {
7690                         dd_dev_info(dd, "%s: not queuing link down\n",
7691                                     __func__);
7692                 } else {
7693                         queue_work(ppd->hfi1_wq, &ppd->link_down_work);
7694                 }
7695         }
7696 }
7697
7698 static const char * const fm_config_txt[] = {
7699 [0] =
7700         "BadHeadDist: Distance violation between two head flits",
7701 [1] =
7702         "BadTailDist: Distance violation between two tail flits",
7703 [2] =
7704         "BadCtrlDist: Distance violation between two credit control flits",
7705 [3] =
7706         "BadCrdAck: Credits return for unsupported VL",
7707 [4] =
7708         "UnsupportedVLMarker: Received VL Marker",
7709 [5] =
7710         "BadPreempt: Exceeded the preemption nesting level",
7711 [6] =
7712         "BadControlFlit: Received unsupported control flit",
7713 /* no 7 */
7714 [8] =
7715         "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7716 };
7717
7718 static const char * const port_rcv_txt[] = {
7719 [1] =
7720         "BadPktLen: Illegal PktLen",
7721 [2] =
7722         "PktLenTooLong: Packet longer than PktLen",
7723 [3] =
7724         "PktLenTooShort: Packet shorter than PktLen",
7725 [4] =
7726         "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7727 [5] =
7728         "BadDLID: Illegal DLID (0, doesn't match HFI)",
7729 [6] =
7730         "BadL2: Illegal L2 opcode",
7731 [7] =
7732         "BadSC: Unsupported SC",
7733 [9] =
7734         "BadRC: Illegal RC",
7735 [11] =
7736         "PreemptError: Preempting with same VL",
7737 [12] =
7738         "PreemptVL15: Preempting a VL15 packet",
7739 };
7740
7741 #define OPA_LDR_FMCONFIG_OFFSET 16
7742 #define OPA_LDR_PORTRCV_OFFSET 0
7743 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7744 {
7745         u64 info, hdr0, hdr1;
7746         const char *extra;
7747         char buf[96];
7748         struct hfi1_pportdata *ppd = dd->pport;
7749         u8 lcl_reason = 0;
7750         int do_bounce = 0;
7751
7752         if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7753                 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7754                         info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7755                         dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7756                         /* set status bit */
7757                         dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7758                 }
7759                 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7760         }
7761
7762         if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7763                 struct hfi1_pportdata *ppd = dd->pport;
7764                 /* this counter saturates at (2^32) - 1 */
7765                 if (ppd->link_downed < (u32)UINT_MAX)
7766                         ppd->link_downed++;
7767                 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7768         }
7769
7770         if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7771                 u8 reason_valid = 1;
7772
7773                 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7774                 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7775                         dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7776                         /* set status bit */
7777                         dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7778                 }
7779                 switch (info) {
7780                 case 0:
7781                 case 1:
7782                 case 2:
7783                 case 3:
7784                 case 4:
7785                 case 5:
7786                 case 6:
7787                         extra = fm_config_txt[info];
7788                         break;
7789                 case 8:
7790                         extra = fm_config_txt[info];
7791                         if (ppd->port_error_action &
7792                             OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7793                                 do_bounce = 1;
7794                                 /*
7795                                  * lcl_reason cannot be derived from info
7796                                  * for this error
7797                                  */
7798                                 lcl_reason =
7799                                   OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7800                         }
7801                         break;
7802                 default:
7803                         reason_valid = 0;
7804                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7805                         extra = buf;
7806                         break;
7807                 }
7808
7809                 if (reason_valid && !do_bounce) {
7810                         do_bounce = ppd->port_error_action &
7811                                         (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
7812                         lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
7813                 }
7814
7815                 /* just report this */
7816                 dd_dev_info(dd, "DCC Error: fmconfig error: %s\n", extra);
7817                 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
7818         }
7819
7820         if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
7821                 u8 reason_valid = 1;
7822
7823                 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
7824                 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
7825                 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
7826                 if (!(dd->err_info_rcvport.status_and_code &
7827                       OPA_EI_STATUS_SMASK)) {
7828                         dd->err_info_rcvport.status_and_code =
7829                                 info & OPA_EI_CODE_SMASK;
7830                         /* set status bit */
7831                         dd->err_info_rcvport.status_and_code |=
7832                                 OPA_EI_STATUS_SMASK;
7833                         /*
7834                          * save first 2 flits in the packet that caused
7835                          * the error
7836                          */
7837                         dd->err_info_rcvport.packet_flit1 = hdr0;
7838                         dd->err_info_rcvport.packet_flit2 = hdr1;
7839                 }
7840                 switch (info) {
7841                 case 1:
7842                 case 2:
7843                 case 3:
7844                 case 4:
7845                 case 5:
7846                 case 6:
7847                 case 7:
7848                 case 9:
7849                 case 11:
7850                 case 12:
7851                         extra = port_rcv_txt[info];
7852                         break;
7853                 default:
7854                         reason_valid = 0;
7855                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7856                         extra = buf;
7857                         break;
7858                 }
7859
7860                 if (reason_valid && !do_bounce) {
7861                         do_bounce = ppd->port_error_action &
7862                                         (1 << (OPA_LDR_PORTRCV_OFFSET + info));
7863                         lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
7864                 }
7865
7866                 /* just report this */
7867                 dd_dev_info(dd, "DCC Error: PortRcv error: %s\n", extra);
7868                 dd_dev_info(dd, "           hdr0 0x%llx, hdr1 0x%llx\n",
7869                             hdr0, hdr1);
7870
7871                 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
7872         }
7873
7874         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
7875                 /* informative only */
7876                 dd_dev_info(dd, "8051 access to LCB blocked\n");
7877                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
7878         }
7879         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
7880                 /* informative only */
7881                 dd_dev_info(dd, "host access to LCB blocked\n");
7882                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
7883         }
7884
7885         /* report any remaining errors */
7886         if (reg)
7887                 dd_dev_info(dd, "DCC Error: %s\n",
7888                             dcc_err_string(buf, sizeof(buf), reg));
7889
7890         if (lcl_reason == 0)
7891                 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
7892
7893         if (do_bounce) {
7894                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
7895                 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
7896                 queue_work(ppd->hfi1_wq, &ppd->link_bounce_work);
7897         }
7898 }
7899
7900 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7901 {
7902         char buf[96];
7903
7904         dd_dev_info(dd, "LCB Error: %s\n",
7905                     lcb_err_string(buf, sizeof(buf), reg));
7906 }
7907
7908 /*
7909  * CCE block DC interrupt.  Source is < 8.
7910  */
7911 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
7912 {
7913         const struct err_reg_info *eri = &dc_errs[source];
7914
7915         if (eri->handler) {
7916                 interrupt_clear_down(dd, 0, eri);
7917         } else if (source == 3 /* dc_lbm_int */) {
7918                 /*
7919                  * This indicates that a parity error has occurred on the
7920                  * address/control lines presented to the LBM.  The error
7921                  * is a single pulse, there is no associated error flag,
7922                  * and it is non-maskable.  This is because if a parity
7923                  * error occurs on the request the request is dropped.
7924                  * This should never occur, but it is nice to know if it
7925                  * ever does.
7926                  */
7927                 dd_dev_err(dd, "Parity error in DC LBM block\n");
7928         } else {
7929                 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
7930         }
7931 }
7932
7933 /*
7934  * TX block send credit interrupt.  Source is < 160.
7935  */
7936 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
7937 {
7938         sc_group_release_update(dd, source);
7939 }
7940
7941 /*
7942  * TX block SDMA interrupt.  Source is < 48.
7943  *
7944  * SDMA interrupts are grouped by type:
7945  *
7946  *       0 -  N-1 = SDma
7947  *       N - 2N-1 = SDmaProgress
7948  *      2N - 3N-1 = SDmaIdle
7949  */
7950 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
7951 {
7952         /* what interrupt */
7953         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
7954         /* which engine */
7955         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
7956
7957 #ifdef CONFIG_SDMA_VERBOSITY
7958         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
7959                    slashstrip(__FILE__), __LINE__, __func__);
7960         sdma_dumpstate(&dd->per_sdma[which]);
7961 #endif
7962
7963         if (likely(what < 3 && which < dd->num_sdma)) {
7964                 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
7965         } else {
7966                 /* should not happen */
7967                 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
7968         }
7969 }
7970
7971 /*
7972  * RX block receive available interrupt.  Source is < 160.
7973  */
7974 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
7975 {
7976         struct hfi1_ctxtdata *rcd;
7977         char *err_detail;
7978
7979         if (likely(source < dd->num_rcv_contexts)) {
7980                 rcd = dd->rcd[source];
7981                 if (rcd) {
7982                         if (source < dd->first_user_ctxt)
7983                                 rcd->do_interrupt(rcd, 0);
7984                         else
7985                                 handle_user_interrupt(rcd);
7986                         return; /* OK */
7987                 }
7988                 /* received an interrupt, but no rcd */
7989                 err_detail = "dataless";
7990         } else {
7991                 /* received an interrupt, but are not using that context */
7992                 err_detail = "out of range";
7993         }
7994         dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
7995                    err_detail, source);
7996 }
7997
7998 /*
7999  * RX block receive urgent interrupt.  Source is < 160.
8000  */
8001 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8002 {
8003         struct hfi1_ctxtdata *rcd;
8004         char *err_detail;
8005
8006         if (likely(source < dd->num_rcv_contexts)) {
8007                 rcd = dd->rcd[source];
8008                 if (rcd) {
8009                         /* only pay attention to user urgent interrupts */
8010                         if (source >= dd->first_user_ctxt)
8011                                 handle_user_interrupt(rcd);
8012                         return; /* OK */
8013                 }
8014                 /* received an interrupt, but no rcd */
8015                 err_detail = "dataless";
8016         } else {
8017                 /* received an interrupt, but are not using that context */
8018                 err_detail = "out of range";
8019         }
8020         dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8021                    err_detail, source);
8022 }
8023
8024 /*
8025  * Reserved range interrupt.  Should not be called in normal operation.
8026  */
8027 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8028 {
8029         char name[64];
8030
8031         dd_dev_err(dd, "unexpected %s interrupt\n",
8032                    is_reserved_name(name, sizeof(name), source));
8033 }
8034
8035 static const struct is_table is_table[] = {
8036 /*
8037  * start                 end
8038  *                              name func               interrupt func
8039  */
8040 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8041                                 is_misc_err_name,       is_misc_err_int },
8042 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8043                                 is_sdma_eng_err_name,   is_sdma_eng_err_int },
8044 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8045                                 is_sendctxt_err_name,   is_sendctxt_err_int },
8046 { IS_SDMA_START,             IS_SDMA_END,
8047                                 is_sdma_eng_name,       is_sdma_eng_int },
8048 { IS_VARIOUS_START,          IS_VARIOUS_END,
8049                                 is_various_name,        is_various_int },
8050 { IS_DC_START,       IS_DC_END,
8051                                 is_dc_name,             is_dc_int },
8052 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8053                                 is_rcv_avail_name,      is_rcv_avail_int },
8054 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8055                                 is_rcv_urgent_name,     is_rcv_urgent_int },
8056 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8057                                 is_send_credit_name,    is_send_credit_int},
8058 { IS_RESERVED_START,     IS_RESERVED_END,
8059                                 is_reserved_name,       is_reserved_int},
8060 };
8061
8062 /*
8063  * Interrupt source interrupt - called when the given source has an interrupt.
8064  * Source is a bit index into an array of 64-bit integers.
8065  */
8066 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8067 {
8068         const struct is_table *entry;
8069
8070         /* avoids a double compare by walking the table in-order */
8071         for (entry = &is_table[0]; entry->is_name; entry++) {
8072                 if (source < entry->end) {
8073                         trace_hfi1_interrupt(dd, entry, source);
8074                         entry->is_int(dd, source - entry->start);
8075                         return;
8076                 }
8077         }
8078         /* fell off the end */
8079         dd_dev_err(dd, "invalid interrupt source %u\n", source);
8080 }
8081
8082 /*
8083  * General interrupt handler.  This is able to correctly handle
8084  * all interrupts in case INTx is used.
8085  */
8086 static irqreturn_t general_interrupt(int irq, void *data)
8087 {
8088         struct hfi1_devdata *dd = data;
8089         u64 regs[CCE_NUM_INT_CSRS];
8090         u32 bit;
8091         int i;
8092
8093         this_cpu_inc(*dd->int_counter);
8094
8095         /* phase 1: scan and clear all handled interrupts */
8096         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8097                 if (dd->gi_mask[i] == 0) {
8098                         regs[i] = 0;    /* used later */
8099                         continue;
8100                 }
8101                 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8102                                 dd->gi_mask[i];
8103                 /* only clear if anything is set */
8104                 if (regs[i])
8105                         write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8106         }
8107
8108         /* phase 2: call the appropriate handler */
8109         for_each_set_bit(bit, (unsigned long *)&regs[0],
8110                          CCE_NUM_INT_CSRS * 64) {
8111                 is_interrupt(dd, bit);
8112         }
8113
8114         return IRQ_HANDLED;
8115 }
8116
8117 static irqreturn_t sdma_interrupt(int irq, void *data)
8118 {
8119         struct sdma_engine *sde = data;
8120         struct hfi1_devdata *dd = sde->dd;
8121         u64 status;
8122
8123 #ifdef CONFIG_SDMA_VERBOSITY
8124         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8125                    slashstrip(__FILE__), __LINE__, __func__);
8126         sdma_dumpstate(sde);
8127 #endif
8128
8129         this_cpu_inc(*dd->int_counter);
8130
8131         /* This read_csr is really bad in the hot path */
8132         status = read_csr(dd,
8133                           CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8134                           & sde->imask;
8135         if (likely(status)) {
8136                 /* clear the interrupt(s) */
8137                 write_csr(dd,
8138                           CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8139                           status);
8140
8141                 /* handle the interrupt(s) */
8142                 sdma_engine_interrupt(sde, status);
8143         } else
8144                 dd_dev_err(dd, "SDMA engine %u interrupt, but no status bits set\n",
8145                            sde->this_idx);
8146
8147         return IRQ_HANDLED;
8148 }
8149
8150 /*
8151  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8152  * to insure that the write completed.  This does NOT guarantee that
8153  * queued DMA writes to memory from the chip are pushed.
8154  */
8155 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8156 {
8157         struct hfi1_devdata *dd = rcd->dd;
8158         u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8159
8160         mmiowb();       /* make sure everything before is written */
8161         write_csr(dd, addr, rcd->imask);
8162         /* force the above write on the chip and get a value back */
8163         (void)read_csr(dd, addr);
8164 }
8165
8166 /* force the receive interrupt */
8167 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8168 {
8169         write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8170 }
8171
8172 /*
8173  * Return non-zero if a packet is present.
8174  *
8175  * This routine is called when rechecking for packets after the RcvAvail
8176  * interrupt has been cleared down.  First, do a quick check of memory for
8177  * a packet present.  If not found, use an expensive CSR read of the context
8178  * tail to determine the actual tail.  The CSR read is necessary because there
8179  * is no method to push pending DMAs to memory other than an interrupt and we
8180  * are trying to determine if we need to force an interrupt.
8181  */
8182 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8183 {
8184         u32 tail;
8185         int present;
8186
8187         if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
8188                 present = (rcd->seq_cnt ==
8189                                 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8190         else /* is RDMA rtail */
8191                 present = (rcd->head != get_rcvhdrtail(rcd));
8192
8193         if (present)
8194                 return 1;
8195
8196         /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8197         tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8198         return rcd->head != tail;
8199 }
8200
8201 /*
8202  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8203  * This routine will try to handle packets immediately (latency), but if
8204  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8205  * chip receive interrupt is *not* cleared down until this or the thread (if
8206  * invoked) is finished.  The intent is to avoid extra interrupts while we
8207  * are processing packets anyway.
8208  */
8209 static irqreturn_t receive_context_interrupt(int irq, void *data)
8210 {
8211         struct hfi1_ctxtdata *rcd = data;
8212         struct hfi1_devdata *dd = rcd->dd;
8213         int disposition;
8214         int present;
8215
8216         trace_hfi1_receive_interrupt(dd, rcd->ctxt);
8217         this_cpu_inc(*dd->int_counter);
8218         aspm_ctx_disable(rcd);
8219
8220         /* receive interrupt remains blocked while processing packets */
8221         disposition = rcd->do_interrupt(rcd, 0);
8222
8223         /*
8224          * Too many packets were seen while processing packets in this
8225          * IRQ handler.  Invoke the handler thread.  The receive interrupt
8226          * remains blocked.
8227          */
8228         if (disposition == RCV_PKT_LIMIT)
8229                 return IRQ_WAKE_THREAD;
8230
8231         /*
8232          * The packet processor detected no more packets.  Clear the receive
8233          * interrupt and recheck for a packet packet that may have arrived
8234          * after the previous check and interrupt clear.  If a packet arrived,
8235          * force another interrupt.
8236          */
8237         clear_recv_intr(rcd);
8238         present = check_packet_present(rcd);
8239         if (present)
8240                 force_recv_intr(rcd);
8241
8242         return IRQ_HANDLED;
8243 }
8244
8245 /*
8246  * Receive packet thread handler.  This expects to be invoked with the
8247  * receive interrupt still blocked.
8248  */
8249 static irqreturn_t receive_context_thread(int irq, void *data)
8250 {
8251         struct hfi1_ctxtdata *rcd = data;
8252         int present;
8253
8254         /* receive interrupt is still blocked from the IRQ handler */
8255         (void)rcd->do_interrupt(rcd, 1);
8256
8257         /*
8258          * The packet processor will only return if it detected no more
8259          * packets.  Hold IRQs here so we can safely clear the interrupt and
8260          * recheck for a packet that may have arrived after the previous
8261          * check and the interrupt clear.  If a packet arrived, force another
8262          * interrupt.
8263          */
8264         local_irq_disable();
8265         clear_recv_intr(rcd);
8266         present = check_packet_present(rcd);
8267         if (present)
8268                 force_recv_intr(rcd);
8269         local_irq_enable();
8270
8271         return IRQ_HANDLED;
8272 }
8273
8274 /* ========================================================================= */
8275
8276 u32 read_physical_state(struct hfi1_devdata *dd)
8277 {
8278         u64 reg;
8279
8280         reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8281         return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8282                                 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8283 }
8284
8285 u32 read_logical_state(struct hfi1_devdata *dd)
8286 {
8287         u64 reg;
8288
8289         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8290         return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8291                                 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8292 }
8293
8294 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8295 {
8296         u64 reg;
8297
8298         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8299         /* clear current state, set new state */
8300         reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8301         reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8302         write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8303 }
8304
8305 /*
8306  * Use the 8051 to read a LCB CSR.
8307  */
8308 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8309 {
8310         u32 regno;
8311         int ret;
8312
8313         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8314                 if (acquire_lcb_access(dd, 0) == 0) {
8315                         *data = read_csr(dd, addr);
8316                         release_lcb_access(dd, 0);
8317                         return 0;
8318                 }
8319                 return -EBUSY;
8320         }
8321
8322         /* register is an index of LCB registers: (offset - base) / 8 */
8323         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8324         ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8325         if (ret != HCMD_SUCCESS)
8326                 return -EBUSY;
8327         return 0;
8328 }
8329
8330 /*
8331  * Read an LCB CSR.  Access may not be in host control, so check.
8332  * Return 0 on success, -EBUSY on failure.
8333  */
8334 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8335 {
8336         struct hfi1_pportdata *ppd = dd->pport;
8337
8338         /* if up, go through the 8051 for the value */
8339         if (ppd->host_link_state & HLS_UP)
8340                 return read_lcb_via_8051(dd, addr, data);
8341         /* if going up or down, no access */
8342         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8343                 return -EBUSY;
8344         /* otherwise, host has access */
8345         *data = read_csr(dd, addr);
8346         return 0;
8347 }
8348
8349 /*
8350  * Use the 8051 to write a LCB CSR.
8351  */
8352 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8353 {
8354         u32 regno;
8355         int ret;
8356
8357         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8358             (dd->dc8051_ver < dc8051_ver(0, 20))) {
8359                 if (acquire_lcb_access(dd, 0) == 0) {
8360                         write_csr(dd, addr, data);
8361                         release_lcb_access(dd, 0);
8362                         return 0;
8363                 }
8364                 return -EBUSY;
8365         }
8366
8367         /* register is an index of LCB registers: (offset - base) / 8 */
8368         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8369         ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8370         if (ret != HCMD_SUCCESS)
8371                 return -EBUSY;
8372         return 0;
8373 }
8374
8375 /*
8376  * Write an LCB CSR.  Access may not be in host control, so check.
8377  * Return 0 on success, -EBUSY on failure.
8378  */
8379 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8380 {
8381         struct hfi1_pportdata *ppd = dd->pport;
8382
8383         /* if up, go through the 8051 for the value */
8384         if (ppd->host_link_state & HLS_UP)
8385                 return write_lcb_via_8051(dd, addr, data);
8386         /* if going up or down, no access */
8387         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8388                 return -EBUSY;
8389         /* otherwise, host has access */
8390         write_csr(dd, addr, data);
8391         return 0;
8392 }
8393
8394 /*
8395  * Returns:
8396  *      < 0 = Linux error, not able to get access
8397  *      > 0 = 8051 command RETURN_CODE
8398  */
8399 static int do_8051_command(
8400         struct hfi1_devdata *dd,
8401         u32 type,
8402         u64 in_data,
8403         u64 *out_data)
8404 {
8405         u64 reg, completed;
8406         int return_code;
8407         unsigned long flags;
8408         unsigned long timeout;
8409
8410         hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8411
8412         /*
8413          * Alternative to holding the lock for a long time:
8414          * - keep busy wait - have other users bounce off
8415          */
8416         spin_lock_irqsave(&dd->dc8051_lock, flags);
8417
8418         /* We can't send any commands to the 8051 if it's in reset */
8419         if (dd->dc_shutdown) {
8420                 return_code = -ENODEV;
8421                 goto fail;
8422         }
8423
8424         /*
8425          * If an 8051 host command timed out previously, then the 8051 is
8426          * stuck.
8427          *
8428          * On first timeout, attempt to reset and restart the entire DC
8429          * block (including 8051). (Is this too big of a hammer?)
8430          *
8431          * If the 8051 times out a second time, the reset did not bring it
8432          * back to healthy life. In that case, fail any subsequent commands.
8433          */
8434         if (dd->dc8051_timed_out) {
8435                 if (dd->dc8051_timed_out > 1) {
8436                         dd_dev_err(dd,
8437                                    "Previous 8051 host command timed out, skipping command %u\n",
8438                                    type);
8439                         return_code = -ENXIO;
8440                         goto fail;
8441                 }
8442                 spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8443                 dc_shutdown(dd);
8444                 dc_start(dd);
8445                 spin_lock_irqsave(&dd->dc8051_lock, flags);
8446         }
8447
8448         /*
8449          * If there is no timeout, then the 8051 command interface is
8450          * waiting for a command.
8451          */
8452
8453         /*
8454          * When writing a LCB CSR, out_data contains the full value to
8455          * to be written, while in_data contains the relative LCB
8456          * address in 7:0.  Do the work here, rather than the caller,
8457          * of distrubting the write data to where it needs to go:
8458          *
8459          * Write data
8460          *   39:00 -> in_data[47:8]
8461          *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8462          *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8463          */
8464         if (type == HCMD_WRITE_LCB_CSR) {
8465                 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8466                 reg = ((((*out_data) >> 40) & 0xff) <<
8467                                 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8468                       | ((((*out_data) >> 48) & 0xffff) <<
8469                                 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8470                 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8471         }
8472
8473         /*
8474          * Do two writes: the first to stabilize the type and req_data, the
8475          * second to activate.
8476          */
8477         reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8478                         << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8479                 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8480                         << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8481         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8482         reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8483         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8484
8485         /* wait for completion, alternate: interrupt */
8486         timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8487         while (1) {
8488                 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8489                 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8490                 if (completed)
8491                         break;
8492                 if (time_after(jiffies, timeout)) {
8493                         dd->dc8051_timed_out++;
8494                         dd_dev_err(dd, "8051 host command %u timeout\n", type);
8495                         if (out_data)
8496                                 *out_data = 0;
8497                         return_code = -ETIMEDOUT;
8498                         goto fail;
8499                 }
8500                 udelay(2);
8501         }
8502
8503         if (out_data) {
8504                 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8505                                 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8506                 if (type == HCMD_READ_LCB_CSR) {
8507                         /* top 16 bits are in a different register */
8508                         *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8509                                 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8510                                 << (48
8511                                     - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8512                 }
8513         }
8514         return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8515                                 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8516         dd->dc8051_timed_out = 0;
8517         /*
8518          * Clear command for next user.
8519          */
8520         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8521
8522 fail:
8523         spin_unlock_irqrestore(&dd->dc8051_lock, flags);
8524
8525         return return_code;
8526 }
8527
8528 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8529 {
8530         return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8531 }
8532
8533 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8534                      u8 lane_id, u32 config_data)
8535 {
8536         u64 data;
8537         int ret;
8538
8539         data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8540                 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8541                 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8542         ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8543         if (ret != HCMD_SUCCESS) {
8544                 dd_dev_err(dd,
8545                            "load 8051 config: field id %d, lane %d, err %d\n",
8546                            (int)field_id, (int)lane_id, ret);
8547         }
8548         return ret;
8549 }
8550
8551 /*
8552  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8553  * set the result, even on error.
8554  * Return 0 on success, -errno on failure
8555  */
8556 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8557                      u32 *result)
8558 {
8559         u64 big_data;
8560         u32 addr;
8561         int ret;
8562
8563         /* address start depends on the lane_id */
8564         if (lane_id < 4)
8565                 addr = (4 * NUM_GENERAL_FIELDS)
8566                         + (lane_id * 4 * NUM_LANE_FIELDS);
8567         else
8568                 addr = 0;
8569         addr += field_id * 4;
8570
8571         /* read is in 8-byte chunks, hardware will truncate the address down */
8572         ret = read_8051_data(dd, addr, 8, &big_data);
8573
8574         if (ret == 0) {
8575                 /* extract the 4 bytes we want */
8576                 if (addr & 0x4)
8577                         *result = (u32)(big_data >> 32);
8578                 else
8579                         *result = (u32)big_data;
8580         } else {
8581                 *result = 0;
8582                 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8583                            __func__, lane_id, field_id);
8584         }
8585
8586         return ret;
8587 }
8588
8589 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8590                               u8 continuous)
8591 {
8592         u32 frame;
8593
8594         frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8595                 | power_management << POWER_MANAGEMENT_SHIFT;
8596         return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8597                                 GENERAL_CONFIG, frame);
8598 }
8599
8600 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8601                                  u16 vl15buf, u8 crc_sizes)
8602 {
8603         u32 frame;
8604
8605         frame = (u32)vau << VAU_SHIFT
8606                 | (u32)z << Z_SHIFT
8607                 | (u32)vcu << VCU_SHIFT
8608                 | (u32)vl15buf << VL15BUF_SHIFT
8609                 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8610         return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8611                                 GENERAL_CONFIG, frame);
8612 }
8613
8614 static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
8615                                      u8 *flag_bits, u16 *link_widths)
8616 {
8617         u32 frame;
8618
8619         read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8620                          &frame);
8621         *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8622         *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8623         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8624 }
8625
8626 static int write_vc_local_link_width(struct hfi1_devdata *dd,
8627                                      u8 misc_bits,
8628                                      u8 flag_bits,
8629                                      u16 link_widths)
8630 {
8631         u32 frame;
8632
8633         frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8634                 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8635                 | (u32)link_widths << LINK_WIDTH_SHIFT;
8636         return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_WIDTH, GENERAL_CONFIG,
8637                      frame);
8638 }
8639
8640 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8641                                  u8 device_rev)
8642 {
8643         u32 frame;
8644
8645         frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8646                 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8647         return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8648 }
8649
8650 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8651                                   u8 *device_rev)
8652 {
8653         u32 frame;
8654
8655         read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8656         *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8657         *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8658                         & REMOTE_DEVICE_REV_MASK;
8659 }
8660
8661 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_a, u8 *ver_b)
8662 {
8663         u32 frame;
8664
8665         read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8666         *ver_a = (frame >> STS_FM_VERSION_A_SHIFT) & STS_FM_VERSION_A_MASK;
8667         *ver_b = (frame >> STS_FM_VERSION_B_SHIFT) & STS_FM_VERSION_B_MASK;
8668 }
8669
8670 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8671                                u8 *continuous)
8672 {
8673         u32 frame;
8674
8675         read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8676         *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8677                                         & POWER_MANAGEMENT_MASK;
8678         *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8679                                         & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8680 }
8681
8682 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8683                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8684 {
8685         u32 frame;
8686
8687         read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8688         *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8689         *z = (frame >> Z_SHIFT) & Z_MASK;
8690         *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8691         *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8692         *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8693 }
8694
8695 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8696                                       u8 *remote_tx_rate,
8697                                       u16 *link_widths)
8698 {
8699         u32 frame;
8700
8701         read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8702                          &frame);
8703         *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8704                                 & REMOTE_TX_RATE_MASK;
8705         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8706 }
8707
8708 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8709 {
8710         u32 frame;
8711
8712         read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8713         *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8714 }
8715
8716 static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
8717 {
8718         u32 frame;
8719
8720         read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
8721         *mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
8722 }
8723
8724 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8725 {
8726         read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8727 }
8728
8729 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8730 {
8731         read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8732 }
8733
8734 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
8735 {
8736         u32 frame;
8737         int ret;
8738
8739         *link_quality = 0;
8740         if (dd->pport->host_link_state & HLS_UP) {
8741                 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
8742                                        &frame);
8743                 if (ret == 0)
8744                         *link_quality = (frame >> LINK_QUALITY_SHIFT)
8745                                                 & LINK_QUALITY_MASK;
8746         }
8747 }
8748
8749 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
8750 {
8751         u32 frame;
8752
8753         read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
8754         *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
8755 }
8756
8757 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
8758 {
8759         u32 frame;
8760
8761         read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
8762         *ldr = (frame & 0xff);
8763 }
8764
8765 static int read_tx_settings(struct hfi1_devdata *dd,
8766                             u8 *enable_lane_tx,
8767                             u8 *tx_polarity_inversion,
8768                             u8 *rx_polarity_inversion,
8769                             u8 *max_rate)
8770 {
8771         u32 frame;
8772         int ret;
8773
8774         ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
8775         *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
8776                                 & ENABLE_LANE_TX_MASK;
8777         *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
8778                                 & TX_POLARITY_INVERSION_MASK;
8779         *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
8780                                 & RX_POLARITY_INVERSION_MASK;
8781         *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
8782         return ret;
8783 }
8784
8785 static int write_tx_settings(struct hfi1_devdata *dd,
8786                              u8 enable_lane_tx,
8787                              u8 tx_polarity_inversion,
8788                              u8 rx_polarity_inversion,
8789                              u8 max_rate)
8790 {
8791         u32 frame;
8792
8793         /* no need to mask, all variable sizes match field widths */
8794         frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
8795                 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
8796                 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
8797                 | max_rate << MAX_RATE_SHIFT;
8798         return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
8799 }
8800
8801 static void check_fabric_firmware_versions(struct hfi1_devdata *dd)
8802 {
8803         u32 frame, version, prod_id;
8804         int ret, lane;
8805
8806         /* 4 lanes */
8807         for (lane = 0; lane < 4; lane++) {
8808                 ret = read_8051_config(dd, SPICO_FW_VERSION, lane, &frame);
8809                 if (ret) {
8810                         dd_dev_err(dd,
8811                                    "Unable to read lane %d firmware details\n",
8812                                    lane);
8813                         continue;
8814                 }
8815                 version = (frame >> SPICO_ROM_VERSION_SHIFT)
8816                                         & SPICO_ROM_VERSION_MASK;
8817                 prod_id = (frame >> SPICO_ROM_PROD_ID_SHIFT)
8818                                         & SPICO_ROM_PROD_ID_MASK;
8819                 dd_dev_info(dd,
8820                             "Lane %d firmware: version 0x%04x, prod_id 0x%04x\n",
8821                             lane, version, prod_id);
8822         }
8823 }
8824
8825 /*
8826  * Read an idle LCB message.
8827  *
8828  * Returns 0 on success, -EINVAL on error
8829  */
8830 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
8831 {
8832         int ret;
8833
8834         ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
8835         if (ret != HCMD_SUCCESS) {
8836                 dd_dev_err(dd, "read idle message: type %d, err %d\n",
8837                            (u32)type, ret);
8838                 return -EINVAL;
8839         }
8840         dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
8841         /* return only the payload as we already know the type */
8842         *data_out >>= IDLE_PAYLOAD_SHIFT;
8843         return 0;
8844 }
8845
8846 /*
8847  * Read an idle SMA message.  To be done in response to a notification from
8848  * the 8051.
8849  *
8850  * Returns 0 on success, -EINVAL on error
8851  */
8852 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
8853 {
8854         return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
8855                                  data);
8856 }
8857
8858 /*
8859  * Send an idle LCB message.
8860  *
8861  * Returns 0 on success, -EINVAL on error
8862  */
8863 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
8864 {
8865         int ret;
8866
8867         dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
8868         ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
8869         if (ret != HCMD_SUCCESS) {
8870                 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
8871                            data, ret);
8872                 return -EINVAL;
8873         }
8874         return 0;
8875 }
8876
8877 /*
8878  * Send an idle SMA message.
8879  *
8880  * Returns 0 on success, -EINVAL on error
8881  */
8882 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
8883 {
8884         u64 data;
8885
8886         data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
8887                 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
8888         return send_idle_message(dd, data);
8889 }
8890
8891 /*
8892  * Initialize the LCB then do a quick link up.  This may or may not be
8893  * in loopback.
8894  *
8895  * return 0 on success, -errno on error
8896  */
8897 static int do_quick_linkup(struct hfi1_devdata *dd)
8898 {
8899         u64 reg;
8900         unsigned long timeout;
8901         int ret;
8902
8903         lcb_shutdown(dd, 0);
8904
8905         if (loopback) {
8906                 /* LCB_CFG_LOOPBACK.VAL = 2 */
8907                 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
8908                 write_csr(dd, DC_LCB_CFG_LOOPBACK,
8909                           IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
8910                 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
8911         }
8912
8913         /* start the LCBs */
8914         /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
8915         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
8916
8917         /* simulator only loopback steps */
8918         if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8919                 /* LCB_CFG_RUN.EN = 1 */
8920                 write_csr(dd, DC_LCB_CFG_RUN,
8921                           1ull << DC_LCB_CFG_RUN_EN_SHIFT);
8922
8923                 /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
8924                 timeout = jiffies + msecs_to_jiffies(10);
8925                 while (1) {
8926                         reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
8927                         if (reg)
8928                                 break;
8929                         if (time_after(jiffies, timeout)) {
8930                                 dd_dev_err(dd,
8931                                            "timeout waiting for LINK_TRANSFER_ACTIVE\n");
8932                                 return -ETIMEDOUT;
8933                         }
8934                         udelay(2);
8935                 }
8936
8937                 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
8938                           1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
8939         }
8940
8941         if (!loopback) {
8942                 /*
8943                  * When doing quick linkup and not in loopback, both
8944                  * sides must be done with LCB set-up before either
8945                  * starts the quick linkup.  Put a delay here so that
8946                  * both sides can be started and have a chance to be
8947                  * done with LCB set up before resuming.
8948                  */
8949                 dd_dev_err(dd,
8950                            "Pausing for peer to be finished with LCB set up\n");
8951                 msleep(5000);
8952                 dd_dev_err(dd, "Continuing with quick linkup\n");
8953         }
8954
8955         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
8956         set_8051_lcb_access(dd);
8957
8958         /*
8959          * State "quick" LinkUp request sets the physical link state to
8960          * LinkUp without a verify capability sequence.
8961          * This state is in simulator v37 and later.
8962          */
8963         ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
8964         if (ret != HCMD_SUCCESS) {
8965                 dd_dev_err(dd,
8966                            "%s: set physical link state to quick LinkUp failed with return %d\n",
8967                            __func__, ret);
8968
8969                 set_host_lcb_access(dd);
8970                 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
8971
8972                 if (ret >= 0)
8973                         ret = -EINVAL;
8974                 return ret;
8975         }
8976
8977         return 0; /* success */
8978 }
8979
8980 /*
8981  * Set the SerDes to internal loopback mode.
8982  * Returns 0 on success, -errno on error.
8983  */
8984 static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
8985 {
8986         int ret;
8987
8988         ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
8989         if (ret == HCMD_SUCCESS)
8990                 return 0;
8991         dd_dev_err(dd,
8992                    "Set physical link state to SerDes Loopback failed with return %d\n",
8993                    ret);
8994         if (ret >= 0)
8995                 ret = -EINVAL;
8996         return ret;
8997 }
8998
8999 /*
9000  * Do all special steps to set up loopback.
9001  */
9002 static int init_loopback(struct hfi1_devdata *dd)
9003 {
9004         dd_dev_info(dd, "Entering loopback mode\n");
9005
9006         /* all loopbacks should disable self GUID check */
9007         write_csr(dd, DC_DC8051_CFG_MODE,
9008                   (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9009
9010         /*
9011          * The simulator has only one loopback option - LCB.  Switch
9012          * to that option, which includes quick link up.
9013          *
9014          * Accept all valid loopback values.
9015          */
9016         if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9017             (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9018              loopback == LOOPBACK_CABLE)) {
9019                 loopback = LOOPBACK_LCB;
9020                 quick_linkup = 1;
9021                 return 0;
9022         }
9023
9024         /* handle serdes loopback */
9025         if (loopback == LOOPBACK_SERDES) {
9026                 /* internal serdes loopack needs quick linkup on RTL */
9027                 if (dd->icode == ICODE_RTL_SILICON)
9028                         quick_linkup = 1;
9029                 return set_serdes_loopback_mode(dd);
9030         }
9031
9032         /* LCB loopback - handled at poll time */
9033         if (loopback == LOOPBACK_LCB) {
9034                 quick_linkup = 1; /* LCB is always quick linkup */
9035
9036                 /* not supported in emulation due to emulation RTL changes */
9037                 if (dd->icode == ICODE_FPGA_EMULATION) {
9038                         dd_dev_err(dd,
9039                                    "LCB loopback not supported in emulation\n");
9040                         return -EINVAL;
9041                 }
9042                 return 0;
9043         }
9044
9045         /* external cable loopback requires no extra steps */
9046         if (loopback == LOOPBACK_CABLE)
9047                 return 0;
9048
9049         dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9050         return -EINVAL;
9051 }
9052
9053 /*
9054  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9055  * used in the Verify Capability link width attribute.
9056  */
9057 static u16 opa_to_vc_link_widths(u16 opa_widths)
9058 {
9059         int i;
9060         u16 result = 0;
9061
9062         static const struct link_bits {
9063                 u16 from;
9064                 u16 to;
9065         } opa_link_xlate[] = {
9066                 { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9067                 { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9068                 { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9069                 { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9070         };
9071
9072         for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9073                 if (opa_widths & opa_link_xlate[i].from)
9074                         result |= opa_link_xlate[i].to;
9075         }
9076         return result;
9077 }
9078
9079 /*
9080  * Set link attributes before moving to polling.
9081  */
9082 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9083 {
9084         struct hfi1_devdata *dd = ppd->dd;
9085         u8 enable_lane_tx;
9086         u8 tx_polarity_inversion;
9087         u8 rx_polarity_inversion;
9088         int ret;
9089
9090         /* reset our fabric serdes to clear any lingering problems */
9091         fabric_serdes_reset(dd);
9092
9093         /* set the local tx rate - need to read-modify-write */
9094         ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9095                                &rx_polarity_inversion, &ppd->local_tx_rate);
9096         if (ret)
9097                 goto set_local_link_attributes_fail;
9098
9099         if (dd->dc8051_ver < dc8051_ver(0, 20)) {
9100                 /* set the tx rate to the fastest enabled */
9101                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9102                         ppd->local_tx_rate = 1;
9103                 else
9104                         ppd->local_tx_rate = 0;
9105         } else {
9106                 /* set the tx rate to all enabled */
9107                 ppd->local_tx_rate = 0;
9108                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9109                         ppd->local_tx_rate |= 2;
9110                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9111                         ppd->local_tx_rate |= 1;
9112         }
9113
9114         enable_lane_tx = 0xF; /* enable all four lanes */
9115         ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9116                                 rx_polarity_inversion, ppd->local_tx_rate);
9117         if (ret != HCMD_SUCCESS)
9118                 goto set_local_link_attributes_fail;
9119
9120         /*
9121          * DC supports continuous updates.
9122          */
9123         ret = write_vc_local_phy(dd,
9124                                  0 /* no power management */,
9125                                  1 /* continuous updates */);
9126         if (ret != HCMD_SUCCESS)
9127                 goto set_local_link_attributes_fail;
9128
9129         /* z=1 in the next call: AU of 0 is not supported by the hardware */
9130         ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9131                                     ppd->port_crc_mode_enabled);
9132         if (ret != HCMD_SUCCESS)
9133                 goto set_local_link_attributes_fail;
9134
9135         ret = write_vc_local_link_width(dd, 0, 0,
9136                                         opa_to_vc_link_widths(
9137                                                 ppd->link_width_enabled));
9138         if (ret != HCMD_SUCCESS)
9139                 goto set_local_link_attributes_fail;
9140
9141         /* let peer know who we are */
9142         ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9143         if (ret == HCMD_SUCCESS)
9144                 return 0;
9145
9146 set_local_link_attributes_fail:
9147         dd_dev_err(dd,
9148                    "Failed to set local link attributes, return 0x%x\n",
9149                    ret);
9150         return ret;
9151 }
9152
9153 /*
9154  * Call this to start the link.
9155  * Do not do anything if the link is disabled.
9156  * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9157  */
9158 int start_link(struct hfi1_pportdata *ppd)
9159 {
9160         if (!ppd->link_enabled) {
9161                 dd_dev_info(ppd->dd,
9162                             "%s: stopping link start because link is disabled\n",
9163                             __func__);
9164                 return 0;
9165         }
9166         if (!ppd->driver_link_ready) {
9167                 dd_dev_info(ppd->dd,
9168                             "%s: stopping link start because driver is not ready\n",
9169                             __func__);
9170                 return 0;
9171         }
9172
9173         /*
9174          * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9175          * pkey table can be configured properly if the HFI unit is connected
9176          * to switch port with MgmtAllowed=NO
9177          */
9178         clear_full_mgmt_pkey(ppd);
9179
9180         return set_link_state(ppd, HLS_DN_POLL);
9181 }
9182
9183 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9184 {
9185         struct hfi1_devdata *dd = ppd->dd;
9186         u64 mask;
9187         unsigned long timeout;
9188
9189         /*
9190          * Check for QSFP interrupt for t_init (SFF 8679)
9191          */
9192         timeout = jiffies + msecs_to_jiffies(2000);
9193         while (1) {
9194                 mask = read_csr(dd, dd->hfi1_id ?
9195                                 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9196                 if (!(mask & QSFP_HFI0_INT_N)) {
9197                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR :
9198                                   ASIC_QSFP1_CLEAR, QSFP_HFI0_INT_N);
9199                         break;
9200                 }
9201                 if (time_after(jiffies, timeout)) {
9202                         dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9203                                     __func__);
9204                         break;
9205                 }
9206                 udelay(2);
9207         }
9208 }
9209
9210 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9211 {
9212         struct hfi1_devdata *dd = ppd->dd;
9213         u64 mask;
9214
9215         mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9216         if (enable)
9217                 mask |= (u64)QSFP_HFI0_INT_N;
9218         else
9219                 mask &= ~(u64)QSFP_HFI0_INT_N;
9220         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9221 }
9222
9223 void reset_qsfp(struct hfi1_pportdata *ppd)
9224 {
9225         struct hfi1_devdata *dd = ppd->dd;
9226         u64 mask, qsfp_mask;
9227
9228         /* Disable INT_N from triggering QSFP interrupts */
9229         set_qsfp_int_n(ppd, 0);
9230
9231         /* Reset the QSFP */
9232         mask = (u64)QSFP_HFI0_RESET_N;
9233
9234         qsfp_mask = read_csr(dd,
9235                              dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9236         qsfp_mask &= ~mask;
9237         write_csr(dd,
9238                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9239
9240         udelay(10);
9241
9242         qsfp_mask |= mask;
9243         write_csr(dd,
9244                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9245
9246         wait_for_qsfp_init(ppd);
9247
9248         /*
9249          * Allow INT_N to trigger the QSFP interrupt to watch
9250          * for alarms and warnings
9251          */
9252         set_qsfp_int_n(ppd, 1);
9253 }
9254
9255 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9256                                         u8 *qsfp_interrupt_status)
9257 {
9258         struct hfi1_devdata *dd = ppd->dd;
9259
9260         if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9261             (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9262                 dd_dev_info(dd, "%s: QSFP cable on fire\n",
9263                             __func__);
9264
9265         if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9266             (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9267                 dd_dev_info(dd, "%s: QSFP cable temperature too low\n",
9268                             __func__);
9269
9270         /*
9271          * The remaining alarms/warnings don't matter if the link is down.
9272          */
9273         if (ppd->host_link_state & HLS_DOWN)
9274                 return 0;
9275
9276         if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9277             (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9278                 dd_dev_info(dd, "%s: QSFP supply voltage too high\n",
9279                             __func__);
9280
9281         if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9282             (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9283                 dd_dev_info(dd, "%s: QSFP supply voltage too low\n",
9284                             __func__);
9285
9286         /* Byte 2 is vendor specific */
9287
9288         if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9289             (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9290                 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too high\n",
9291                             __func__);
9292
9293         if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9294             (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9295                 dd_dev_info(dd, "%s: Cable RX channel 1/2 power too low\n",
9296                             __func__);
9297
9298         if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9299             (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9300                 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too high\n",
9301                             __func__);
9302
9303         if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9304             (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9305                 dd_dev_info(dd, "%s: Cable RX channel 3/4 power too low\n",
9306                             __func__);
9307
9308         if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9309             (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9310                 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too high\n",
9311                             __func__);
9312
9313         if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9314             (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9315                 dd_dev_info(dd, "%s: Cable TX channel 1/2 bias too low\n",
9316                             __func__);
9317
9318         if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9319             (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9320                 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too high\n",
9321                             __func__);
9322
9323         if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9324             (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9325                 dd_dev_info(dd, "%s: Cable TX channel 3/4 bias too low\n",
9326                             __func__);
9327
9328         if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9329             (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9330                 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too high\n",
9331                             __func__);
9332
9333         if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9334             (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9335                 dd_dev_info(dd, "%s: Cable TX channel 1/2 power too low\n",
9336                             __func__);
9337
9338         if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9339             (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9340                 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too high\n",
9341                             __func__);
9342
9343         if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9344             (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9345                 dd_dev_info(dd, "%s: Cable TX channel 3/4 power too low\n",
9346                             __func__);
9347
9348         /* Bytes 9-10 and 11-12 are reserved */
9349         /* Bytes 13-15 are vendor specific */
9350
9351         return 0;
9352 }
9353
9354 /* This routine will only be scheduled if the QSFP module present is asserted */
9355 void qsfp_event(struct work_struct *work)
9356 {
9357         struct qsfp_data *qd;
9358         struct hfi1_pportdata *ppd;
9359         struct hfi1_devdata *dd;
9360
9361         qd = container_of(work, struct qsfp_data, qsfp_work);
9362         ppd = qd->ppd;
9363         dd = ppd->dd;
9364
9365         /* Sanity check */
9366         if (!qsfp_mod_present(ppd))
9367                 return;
9368
9369         /*
9370          * Turn DC back on after cable has been re-inserted. Up until
9371          * now, the DC has been in reset to save power.
9372          */
9373         dc_start(dd);
9374
9375         if (qd->cache_refresh_required) {
9376                 set_qsfp_int_n(ppd, 0);
9377
9378                 wait_for_qsfp_init(ppd);
9379
9380                 /*
9381                  * Allow INT_N to trigger the QSFP interrupt to watch
9382                  * for alarms and warnings
9383                  */
9384                 set_qsfp_int_n(ppd, 1);
9385
9386                 tune_serdes(ppd);
9387
9388                 start_link(ppd);
9389         }
9390
9391         if (qd->check_interrupt_flags) {
9392                 u8 qsfp_interrupt_status[16] = {0,};
9393
9394                 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9395                                   &qsfp_interrupt_status[0], 16) != 16) {
9396                         dd_dev_info(dd,
9397                                     "%s: Failed to read status of QSFP module\n",
9398                                     __func__);
9399                 } else {
9400                         unsigned long flags;
9401
9402                         handle_qsfp_error_conditions(
9403                                         ppd, qsfp_interrupt_status);
9404                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9405                         ppd->qsfp_info.check_interrupt_flags = 0;
9406                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9407                                                flags);
9408                 }
9409         }
9410 }
9411
9412 static void init_qsfp_int(struct hfi1_devdata *dd)
9413 {
9414         struct hfi1_pportdata *ppd = dd->pport;
9415         u64 qsfp_mask, cce_int_mask;
9416         const int qsfp1_int_smask = QSFP1_INT % 64;
9417         const int qsfp2_int_smask = QSFP2_INT % 64;
9418
9419         /*
9420          * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
9421          * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
9422          * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
9423          * the index of the appropriate CSR in the CCEIntMask CSR array
9424          */
9425         cce_int_mask = read_csr(dd, CCE_INT_MASK +
9426                                 (8 * (QSFP1_INT / 64)));
9427         if (dd->hfi1_id) {
9428                 cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
9429                 write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
9430                           cce_int_mask);
9431         } else {
9432                 cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
9433                 write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
9434                           cce_int_mask);
9435         }
9436
9437         qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9438         /* Clear current status to avoid spurious interrupts */
9439         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9440                   qsfp_mask);
9441         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9442                   qsfp_mask);
9443
9444         set_qsfp_int_n(ppd, 0);
9445
9446         /* Handle active low nature of INT_N and MODPRST_N pins */
9447         if (qsfp_mod_present(ppd))
9448                 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9449         write_csr(dd,
9450                   dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9451                   qsfp_mask);
9452 }
9453
9454 /*
9455  * Do a one-time initialize of the LCB block.
9456  */
9457 static void init_lcb(struct hfi1_devdata *dd)
9458 {
9459         /* simulator does not correctly handle LCB cclk loopback, skip */
9460         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9461                 return;
9462
9463         /* the DC has been reset earlier in the driver load */
9464
9465         /* set LCB for cclk loopback on the port */
9466         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9467         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9468         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9469         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9470         write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9471         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9472         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9473 }
9474
9475 int bringup_serdes(struct hfi1_pportdata *ppd)
9476 {
9477         struct hfi1_devdata *dd = ppd->dd;
9478         u64 guid;
9479         int ret;
9480
9481         if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9482                 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9483
9484         guid = ppd->guid;
9485         if (!guid) {
9486                 if (dd->base_guid)
9487                         guid = dd->base_guid + ppd->port - 1;
9488                 ppd->guid = guid;
9489         }
9490
9491         /* Set linkinit_reason on power up per OPA spec */
9492         ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9493
9494         /* one-time init of the LCB */
9495         init_lcb(dd);
9496
9497         if (loopback) {
9498                 ret = init_loopback(dd);
9499                 if (ret < 0)
9500                         return ret;
9501         }
9502
9503         get_port_type(ppd);
9504         if (ppd->port_type == PORT_TYPE_QSFP) {
9505                 set_qsfp_int_n(ppd, 0);
9506                 wait_for_qsfp_init(ppd);
9507                 set_qsfp_int_n(ppd, 1);
9508         }
9509
9510         /*
9511          * Tune the SerDes to a ballpark setting for
9512          * optimal signal and bit error rate
9513          * Needs to be done before starting the link
9514          */
9515         tune_serdes(ppd);
9516
9517         return start_link(ppd);
9518 }
9519
9520 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9521 {
9522         struct hfi1_devdata *dd = ppd->dd;
9523
9524         /*
9525          * Shut down the link and keep it down.   First turn off that the
9526          * driver wants to allow the link to be up (driver_link_ready).
9527          * Then make sure the link is not automatically restarted
9528          * (link_enabled).  Cancel any pending restart.  And finally
9529          * go offline.
9530          */
9531         ppd->driver_link_ready = 0;
9532         ppd->link_enabled = 0;
9533
9534         ppd->offline_disabled_reason =
9535                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
9536         set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
9537                              OPA_LINKDOWN_REASON_SMA_DISABLED);
9538         set_link_state(ppd, HLS_DN_OFFLINE);
9539
9540         /* disable the port */
9541         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9542 }
9543
9544 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9545 {
9546         struct hfi1_pportdata *ppd;
9547         int i;
9548
9549         ppd = (struct hfi1_pportdata *)(dd + 1);
9550         for (i = 0; i < dd->num_pports; i++, ppd++) {
9551                 ppd->ibport_data.rvp.rc_acks = NULL;
9552                 ppd->ibport_data.rvp.rc_qacks = NULL;
9553                 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9554                 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9555                 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9556                 if (!ppd->ibport_data.rvp.rc_acks ||
9557                     !ppd->ibport_data.rvp.rc_delayed_comp ||
9558                     !ppd->ibport_data.rvp.rc_qacks)
9559                         return -ENOMEM;
9560         }
9561
9562         return 0;
9563 }
9564
9565 static const char * const pt_names[] = {
9566         "expected",
9567         "eager",
9568         "invalid"
9569 };
9570
9571 static const char *pt_name(u32 type)
9572 {
9573         return type >= ARRAY_SIZE(pt_names) ? "unknown" : pt_names[type];
9574 }
9575
9576 /*
9577  * index is the index into the receive array
9578  */
9579 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9580                   u32 type, unsigned long pa, u16 order)
9581 {
9582         u64 reg;
9583         void __iomem *base = (dd->rcvarray_wc ? dd->rcvarray_wc :
9584                               (dd->kregbase + RCV_ARRAY));
9585
9586         if (!(dd->flags & HFI1_PRESENT))
9587                 goto done;
9588
9589         if (type == PT_INVALID) {
9590                 pa = 0;
9591         } else if (type > PT_INVALID) {
9592                 dd_dev_err(dd,
9593                            "unexpected receive array type %u for index %u, not handled\n",
9594                            type, index);
9595                 goto done;
9596         }
9597
9598         hfi1_cdbg(TID, "type %s, index 0x%x, pa 0x%lx, bsize 0x%lx",
9599                   pt_name(type), index, pa, (unsigned long)order);
9600
9601 #define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9602         reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9603                 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9604                 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9605                                         << RCV_ARRAY_RT_ADDR_SHIFT;
9606         writeq(reg, base + (index * 8));
9607
9608         if (type == PT_EAGER)
9609                 /*
9610                  * Eager entries are written one-by-one so we have to push them
9611                  * after we write the entry.
9612                  */
9613                 flush_wc();
9614 done:
9615         return;
9616 }
9617
9618 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9619 {
9620         struct hfi1_devdata *dd = rcd->dd;
9621         u32 i;
9622
9623         /* this could be optimized */
9624         for (i = rcd->eager_base; i < rcd->eager_base +
9625                      rcd->egrbufs.alloced; i++)
9626                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9627
9628         for (i = rcd->expected_base;
9629                         i < rcd->expected_base + rcd->expected_count; i++)
9630                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9631 }
9632
9633 int hfi1_get_base_kinfo(struct hfi1_ctxtdata *rcd,
9634                         struct hfi1_ctxt_info *kinfo)
9635 {
9636         kinfo->runtime_flags = (HFI1_MISC_GET() << HFI1_CAP_USER_SHIFT) |
9637                 HFI1_CAP_UGET(MASK) | HFI1_CAP_KGET(K2U);
9638         return 0;
9639 }
9640
9641 struct hfi1_message_header *hfi1_get_msgheader(
9642                                 struct hfi1_devdata *dd, __le32 *rhf_addr)
9643 {
9644         u32 offset = rhf_hdrq_offset(rhf_to_cpu(rhf_addr));
9645
9646         return (struct hfi1_message_header *)
9647                 (rhf_addr - dd->rhf_offset + offset);
9648 }
9649
9650 static const char * const ib_cfg_name_strings[] = {
9651         "HFI1_IB_CFG_LIDLMC",
9652         "HFI1_IB_CFG_LWID_DG_ENB",
9653         "HFI1_IB_CFG_LWID_ENB",
9654         "HFI1_IB_CFG_LWID",
9655         "HFI1_IB_CFG_SPD_ENB",
9656         "HFI1_IB_CFG_SPD",
9657         "HFI1_IB_CFG_RXPOL_ENB",
9658         "HFI1_IB_CFG_LREV_ENB",
9659         "HFI1_IB_CFG_LINKLATENCY",
9660         "HFI1_IB_CFG_HRTBT",
9661         "HFI1_IB_CFG_OP_VLS",
9662         "HFI1_IB_CFG_VL_HIGH_CAP",
9663         "HFI1_IB_CFG_VL_LOW_CAP",
9664         "HFI1_IB_CFG_OVERRUN_THRESH",
9665         "HFI1_IB_CFG_PHYERR_THRESH",
9666         "HFI1_IB_CFG_LINKDEFAULT",
9667         "HFI1_IB_CFG_PKEYS",
9668         "HFI1_IB_CFG_MTU",
9669         "HFI1_IB_CFG_LSTATE",
9670         "HFI1_IB_CFG_VL_HIGH_LIMIT",
9671         "HFI1_IB_CFG_PMA_TICKS",
9672         "HFI1_IB_CFG_PORT"
9673 };
9674
9675 static const char *ib_cfg_name(int which)
9676 {
9677         if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9678                 return "invalid";
9679         return ib_cfg_name_strings[which];
9680 }
9681
9682 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9683 {
9684         struct hfi1_devdata *dd = ppd->dd;
9685         int val = 0;
9686
9687         switch (which) {
9688         case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9689                 val = ppd->link_width_enabled;
9690                 break;
9691         case HFI1_IB_CFG_LWID: /* currently active Link-width */
9692                 val = ppd->link_width_active;
9693                 break;
9694         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9695                 val = ppd->link_speed_enabled;
9696                 break;
9697         case HFI1_IB_CFG_SPD: /* current Link speed */
9698                 val = ppd->link_speed_active;
9699                 break;
9700
9701         case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9702         case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9703         case HFI1_IB_CFG_LINKLATENCY:
9704                 goto unimplemented;
9705
9706         case HFI1_IB_CFG_OP_VLS:
9707                 val = ppd->vls_operational;
9708                 break;
9709         case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9710                 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9711                 break;
9712         case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9713                 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9714                 break;
9715         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9716                 val = ppd->overrun_threshold;
9717                 break;
9718         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
9719                 val = ppd->phy_error_threshold;
9720                 break;
9721         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
9722                 val = dd->link_default;
9723                 break;
9724
9725         case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
9726         case HFI1_IB_CFG_PMA_TICKS:
9727         default:
9728 unimplemented:
9729                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
9730                         dd_dev_info(
9731                                 dd,
9732                                 "%s: which %s: not implemented\n",
9733                                 __func__,
9734                                 ib_cfg_name(which));
9735                 break;
9736         }
9737
9738         return val;
9739 }
9740
9741 /*
9742  * The largest MAD packet size.
9743  */
9744 #define MAX_MAD_PACKET 2048
9745
9746 /*
9747  * Return the maximum header bytes that can go on the _wire_
9748  * for this device. This count includes the ICRC which is
9749  * not part of the packet held in memory but it is appended
9750  * by the HW.
9751  * This is dependent on the device's receive header entry size.
9752  * HFI allows this to be set per-receive context, but the
9753  * driver presently enforces a global value.
9754  */
9755 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
9756 {
9757         /*
9758          * The maximum non-payload (MTU) bytes in LRH.PktLen are
9759          * the Receive Header Entry Size minus the PBC (or RHF) size
9760          * plus one DW for the ICRC appended by HW.
9761          *
9762          * dd->rcd[0].rcvhdrqentsize is in DW.
9763          * We use rcd[0] as all context will have the same value. Also,
9764          * the first kernel context would have been allocated by now so
9765          * we are guaranteed a valid value.
9766          */
9767         return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
9768 }
9769
9770 /*
9771  * Set Send Length
9772  * @ppd - per port data
9773  *
9774  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
9775  * registers compare against LRH.PktLen, so use the max bytes included
9776  * in the LRH.
9777  *
9778  * This routine changes all VL values except VL15, which it maintains at
9779  * the same value.
9780  */
9781 static void set_send_length(struct hfi1_pportdata *ppd)
9782 {
9783         struct hfi1_devdata *dd = ppd->dd;
9784         u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
9785         u32 maxvlmtu = dd->vld[15].mtu;
9786         u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
9787                               & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
9788                 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
9789         int i, j;
9790         u32 thres;
9791
9792         for (i = 0; i < ppd->vls_supported; i++) {
9793                 if (dd->vld[i].mtu > maxvlmtu)
9794                         maxvlmtu = dd->vld[i].mtu;
9795                 if (i <= 3)
9796                         len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
9797                                  & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
9798                                 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
9799                 else
9800                         len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
9801                                  & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
9802                                 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
9803         }
9804         write_csr(dd, SEND_LEN_CHECK0, len1);
9805         write_csr(dd, SEND_LEN_CHECK1, len2);
9806         /* adjust kernel credit return thresholds based on new MTUs */
9807         /* all kernel receive contexts have the same hdrqentsize */
9808         for (i = 0; i < ppd->vls_supported; i++) {
9809                 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
9810                             sc_mtu_to_threshold(dd->vld[i].sc,
9811                                                 dd->vld[i].mtu,
9812                                                 dd->rcd[0]->rcvhdrqentsize));
9813                 for (j = 0; j < INIT_SC_PER_VL; j++)
9814                         sc_set_cr_threshold(
9815                                         pio_select_send_context_vl(dd, j, i),
9816                                             thres);
9817         }
9818         thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
9819                     sc_mtu_to_threshold(dd->vld[15].sc,
9820                                         dd->vld[15].mtu,
9821                                         dd->rcd[0]->rcvhdrqentsize));
9822         sc_set_cr_threshold(dd->vld[15].sc, thres);
9823
9824         /* Adjust maximum MTU for the port in DC */
9825         dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
9826                 (ilog2(maxvlmtu >> 8) + 1);
9827         len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
9828         len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
9829         len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
9830                 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
9831         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
9832 }
9833
9834 static void set_lidlmc(struct hfi1_pportdata *ppd)
9835 {
9836         int i;
9837         u64 sreg = 0;
9838         struct hfi1_devdata *dd = ppd->dd;
9839         u32 mask = ~((1U << ppd->lmc) - 1);
9840         u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
9841
9842         if (dd->hfi1_snoop.mode_flag)
9843                 dd_dev_info(dd, "Set lid/lmc while snooping");
9844
9845         c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
9846                 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
9847         c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
9848                         << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
9849               ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
9850                         << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
9851         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
9852
9853         /*
9854          * Iterate over all the send contexts and set their SLID check
9855          */
9856         sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
9857                         SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
9858                (((ppd->lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
9859                         SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
9860
9861         for (i = 0; i < dd->chip_send_contexts; i++) {
9862                 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
9863                           i, (u32)sreg);
9864                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
9865         }
9866
9867         /* Now we have to do the same thing for the sdma engines */
9868         sdma_update_lmc(dd, mask, ppd->lid);
9869 }
9870
9871 static int wait_phy_linkstate(struct hfi1_devdata *dd, u32 state, u32 msecs)
9872 {
9873         unsigned long timeout;
9874         u32 curr_state;
9875
9876         timeout = jiffies + msecs_to_jiffies(msecs);
9877         while (1) {
9878                 curr_state = read_physical_state(dd);
9879                 if (curr_state == state)
9880                         break;
9881                 if (time_after(jiffies, timeout)) {
9882                         dd_dev_err(dd,
9883                                    "timeout waiting for phy link state 0x%x, current state is 0x%x\n",
9884                                    state, curr_state);
9885                         return -ETIMEDOUT;
9886                 }
9887                 usleep_range(1950, 2050); /* sleep 2ms-ish */
9888         }
9889
9890         return 0;
9891 }
9892
9893 /*
9894  * Helper for set_link_state().  Do not call except from that routine.
9895  * Expects ppd->hls_mutex to be held.
9896  *
9897  * @rem_reason value to be sent to the neighbor
9898  *
9899  * LinkDownReasons only set if transition succeeds.
9900  */
9901 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
9902 {
9903         struct hfi1_devdata *dd = ppd->dd;
9904         u32 pstate, previous_state;
9905         u32 last_local_state;
9906         u32 last_remote_state;
9907         int ret;
9908         int do_transition;
9909         int do_wait;
9910
9911         previous_state = ppd->host_link_state;
9912         ppd->host_link_state = HLS_GOING_OFFLINE;
9913         pstate = read_physical_state(dd);
9914         if (pstate == PLS_OFFLINE) {
9915                 do_transition = 0;      /* in right state */
9916                 do_wait = 0;            /* ...no need to wait */
9917         } else if ((pstate & 0xff) == PLS_OFFLINE) {
9918                 do_transition = 0;      /* in an offline transient state */
9919                 do_wait = 1;            /* ...wait for it to settle */
9920         } else {
9921                 do_transition = 1;      /* need to move to offline */
9922                 do_wait = 1;            /* ...will need to wait */
9923         }
9924
9925         if (do_transition) {
9926                 ret = set_physical_link_state(dd,
9927                                               (rem_reason << 8) | PLS_OFFLINE);
9928
9929                 if (ret != HCMD_SUCCESS) {
9930                         dd_dev_err(dd,
9931                                    "Failed to transition to Offline link state, return %d\n",
9932                                    ret);
9933                         return -EINVAL;
9934                 }
9935                 if (ppd->offline_disabled_reason ==
9936                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
9937                         ppd->offline_disabled_reason =
9938                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
9939         }
9940
9941         if (do_wait) {
9942                 /* it can take a while for the link to go down */
9943                 ret = wait_phy_linkstate(dd, PLS_OFFLINE, 10000);
9944                 if (ret < 0)
9945                         return ret;
9946         }
9947
9948         /* make sure the logical state is also down */
9949         wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
9950
9951         /*
9952          * Now in charge of LCB - must be after the physical state is
9953          * offline.quiet and before host_link_state is changed.
9954          */
9955         set_host_lcb_access(dd);
9956         write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9957         ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
9958
9959         if (ppd->port_type == PORT_TYPE_QSFP &&
9960             ppd->qsfp_info.limiting_active &&
9961             qsfp_mod_present(ppd)) {
9962                 int ret;
9963
9964                 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
9965                 if (ret == 0) {
9966                         set_qsfp_tx(ppd, 0);
9967                         release_chip_resource(dd, qsfp_resource(dd));
9968                 } else {
9969                         /* not fatal, but should warn */
9970                         dd_dev_err(dd,
9971                                    "Unable to acquire lock to turn off QSFP TX\n");
9972                 }
9973         }
9974
9975         /*
9976          * The LNI has a mandatory wait time after the physical state
9977          * moves to Offline.Quiet.  The wait time may be different
9978          * depending on how the link went down.  The 8051 firmware
9979          * will observe the needed wait time and only move to ready
9980          * when that is completed.  The largest of the quiet timeouts
9981          * is 6s, so wait that long and then at least 0.5s more for
9982          * other transitions, and another 0.5s for a buffer.
9983          */
9984         ret = wait_fm_ready(dd, 7000);
9985         if (ret) {
9986                 dd_dev_err(dd,
9987                            "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
9988                 /* state is really offline, so make it so */
9989                 ppd->host_link_state = HLS_DN_OFFLINE;
9990                 return ret;
9991         }
9992
9993         /*
9994          * The state is now offline and the 8051 is ready to accept host
9995          * requests.
9996          *      - change our state
9997          *      - notify others if we were previously in a linkup state
9998          */
9999         ppd->host_link_state = HLS_DN_OFFLINE;
10000         if (previous_state & HLS_UP) {
10001                 /* went down while link was up */
10002                 handle_linkup_change(dd, 0);
10003         } else if (previous_state
10004                         & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10005                 /* went down while attempting link up */
10006                 /* byte 1 of last_*_state is the failure reason */
10007                 read_last_local_state(dd, &last_local_state);
10008                 read_last_remote_state(dd, &last_remote_state);
10009                 dd_dev_err(dd,
10010                            "LNI failure last states: local 0x%08x, remote 0x%08x\n",
10011                            last_local_state, last_remote_state);
10012         }
10013
10014         /* the active link width (downgrade) is 0 on link down */
10015         ppd->link_width_active = 0;
10016         ppd->link_width_downgrade_tx_active = 0;
10017         ppd->link_width_downgrade_rx_active = 0;
10018         ppd->current_egress_rate = 0;
10019         return 0;
10020 }
10021
10022 /* return the link state name */
10023 static const char *link_state_name(u32 state)
10024 {
10025         const char *name;
10026         int n = ilog2(state);
10027         static const char * const names[] = {
10028                 [__HLS_UP_INIT_BP]       = "INIT",
10029                 [__HLS_UP_ARMED_BP]      = "ARMED",
10030                 [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
10031                 [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
10032                 [__HLS_DN_POLL_BP]       = "POLL",
10033                 [__HLS_DN_DISABLE_BP]    = "DISABLE",
10034                 [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
10035                 [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
10036                 [__HLS_GOING_UP_BP]      = "GOING_UP",
10037                 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10038                 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10039         };
10040
10041         name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10042         return name ? name : "unknown";
10043 }
10044
10045 /* return the link state reason name */
10046 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10047 {
10048         if (state == HLS_UP_INIT) {
10049                 switch (ppd->linkinit_reason) {
10050                 case OPA_LINKINIT_REASON_LINKUP:
10051                         return "(LINKUP)";
10052                 case OPA_LINKINIT_REASON_FLAPPING:
10053                         return "(FLAPPING)";
10054                 case OPA_LINKINIT_OUTSIDE_POLICY:
10055                         return "(OUTSIDE_POLICY)";
10056                 case OPA_LINKINIT_QUARANTINED:
10057                         return "(QUARANTINED)";
10058                 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10059                         return "(INSUFIC_CAPABILITY)";
10060                 default:
10061                         break;
10062                 }
10063         }
10064         return "";
10065 }
10066
10067 /*
10068  * driver_physical_state - convert the driver's notion of a port's
10069  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10070  * Return -1 (converted to a u32) to indicate error.
10071  */
10072 u32 driver_physical_state(struct hfi1_pportdata *ppd)
10073 {
10074         switch (ppd->host_link_state) {
10075         case HLS_UP_INIT:
10076         case HLS_UP_ARMED:
10077         case HLS_UP_ACTIVE:
10078                 return IB_PORTPHYSSTATE_LINKUP;
10079         case HLS_DN_POLL:
10080                 return IB_PORTPHYSSTATE_POLLING;
10081         case HLS_DN_DISABLE:
10082                 return IB_PORTPHYSSTATE_DISABLED;
10083         case HLS_DN_OFFLINE:
10084                 return OPA_PORTPHYSSTATE_OFFLINE;
10085         case HLS_VERIFY_CAP:
10086                 return IB_PORTPHYSSTATE_POLLING;
10087         case HLS_GOING_UP:
10088                 return IB_PORTPHYSSTATE_POLLING;
10089         case HLS_GOING_OFFLINE:
10090                 return OPA_PORTPHYSSTATE_OFFLINE;
10091         case HLS_LINK_COOLDOWN:
10092                 return OPA_PORTPHYSSTATE_OFFLINE;
10093         case HLS_DN_DOWNDEF:
10094         default:
10095                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10096                            ppd->host_link_state);
10097                 return  -1;
10098         }
10099 }
10100
10101 /*
10102  * driver_logical_state - convert the driver's notion of a port's
10103  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10104  * (converted to a u32) to indicate error.
10105  */
10106 u32 driver_logical_state(struct hfi1_pportdata *ppd)
10107 {
10108         if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10109                 return IB_PORT_DOWN;
10110
10111         switch (ppd->host_link_state & HLS_UP) {
10112         case HLS_UP_INIT:
10113                 return IB_PORT_INIT;
10114         case HLS_UP_ARMED:
10115                 return IB_PORT_ARMED;
10116         case HLS_UP_ACTIVE:
10117                 return IB_PORT_ACTIVE;
10118         default:
10119                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10120                            ppd->host_link_state);
10121         return -1;
10122         }
10123 }
10124
10125 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10126                           u8 neigh_reason, u8 rem_reason)
10127 {
10128         if (ppd->local_link_down_reason.latest == 0 &&
10129             ppd->neigh_link_down_reason.latest == 0) {
10130                 ppd->local_link_down_reason.latest = lcl_reason;
10131                 ppd->neigh_link_down_reason.latest = neigh_reason;
10132                 ppd->remote_link_down_reason = rem_reason;
10133         }
10134 }
10135
10136 /*
10137  * Change the physical and/or logical link state.
10138  *
10139  * Do not call this routine while inside an interrupt.  It contains
10140  * calls to routines that can take multiple seconds to finish.
10141  *
10142  * Returns 0 on success, -errno on failure.
10143  */
10144 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10145 {
10146         struct hfi1_devdata *dd = ppd->dd;
10147         struct ib_event event = {.device = NULL};
10148         int ret1, ret = 0;
10149         int orig_new_state, poll_bounce;
10150
10151         mutex_lock(&ppd->hls_lock);
10152
10153         orig_new_state = state;
10154         if (state == HLS_DN_DOWNDEF)
10155                 state = dd->link_default;
10156
10157         /* interpret poll -> poll as a link bounce */
10158         poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10159                       state == HLS_DN_POLL;
10160
10161         dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10162                     link_state_name(ppd->host_link_state),
10163                     link_state_name(orig_new_state),
10164                     poll_bounce ? "(bounce) " : "",
10165                     link_state_reason_name(ppd, state));
10166
10167         /*
10168          * If we're going to a (HLS_*) link state that implies the logical
10169          * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10170          * reset is_sm_config_started to 0.
10171          */
10172         if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10173                 ppd->is_sm_config_started = 0;
10174
10175         /*
10176          * Do nothing if the states match.  Let a poll to poll link bounce
10177          * go through.
10178          */
10179         if (ppd->host_link_state == state && !poll_bounce)
10180                 goto done;
10181
10182         switch (state) {
10183         case HLS_UP_INIT:
10184                 if (ppd->host_link_state == HLS_DN_POLL &&
10185                     (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10186                         /*
10187                          * Quick link up jumps from polling to here.
10188                          *
10189                          * Whether in normal or loopback mode, the
10190                          * simulator jumps from polling to link up.
10191                          * Accept that here.
10192                          */
10193                         /* OK */
10194                 } else if (ppd->host_link_state != HLS_GOING_UP) {
10195                         goto unexpected;
10196                 }
10197
10198                 ppd->host_link_state = HLS_UP_INIT;
10199                 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10200                 if (ret) {
10201                         /* logical state didn't change, stay at going_up */
10202                         ppd->host_link_state = HLS_GOING_UP;
10203                         dd_dev_err(dd,
10204                                    "%s: logical state did not change to INIT\n",
10205                                    __func__);
10206                 } else {
10207                         /* clear old transient LINKINIT_REASON code */
10208                         if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10209                                 ppd->linkinit_reason =
10210                                         OPA_LINKINIT_REASON_LINKUP;
10211
10212                         /* enable the port */
10213                         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10214
10215                         handle_linkup_change(dd, 1);
10216                 }
10217                 break;
10218         case HLS_UP_ARMED:
10219                 if (ppd->host_link_state != HLS_UP_INIT)
10220                         goto unexpected;
10221
10222                 ppd->host_link_state = HLS_UP_ARMED;
10223                 set_logical_state(dd, LSTATE_ARMED);
10224                 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10225                 if (ret) {
10226                         /* logical state didn't change, stay at init */
10227                         ppd->host_link_state = HLS_UP_INIT;
10228                         dd_dev_err(dd,
10229                                    "%s: logical state did not change to ARMED\n",
10230                                    __func__);
10231                 }
10232                 /*
10233                  * The simulator does not currently implement SMA messages,
10234                  * so neighbor_normal is not set.  Set it here when we first
10235                  * move to Armed.
10236                  */
10237                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10238                         ppd->neighbor_normal = 1;
10239                 break;
10240         case HLS_UP_ACTIVE:
10241                 if (ppd->host_link_state != HLS_UP_ARMED)
10242                         goto unexpected;
10243
10244                 ppd->host_link_state = HLS_UP_ACTIVE;
10245                 set_logical_state(dd, LSTATE_ACTIVE);
10246                 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10247                 if (ret) {
10248                         /* logical state didn't change, stay at armed */
10249                         ppd->host_link_state = HLS_UP_ARMED;
10250                         dd_dev_err(dd,
10251                                    "%s: logical state did not change to ACTIVE\n",
10252                                    __func__);
10253                 } else {
10254                         /* tell all engines to go running */
10255                         sdma_all_running(dd);
10256
10257                         /* Signal the IB layer that the port has went active */
10258                         event.device = &dd->verbs_dev.rdi.ibdev;
10259                         event.element.port_num = ppd->port;
10260                         event.event = IB_EVENT_PORT_ACTIVE;
10261                 }
10262                 break;
10263         case HLS_DN_POLL:
10264                 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10265                      ppd->host_link_state == HLS_DN_OFFLINE) &&
10266                     dd->dc_shutdown)
10267                         dc_start(dd);
10268                 /* Hand LED control to the DC */
10269                 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10270
10271                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10272                         u8 tmp = ppd->link_enabled;
10273
10274                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10275                         if (ret) {
10276                                 ppd->link_enabled = tmp;
10277                                 break;
10278                         }
10279                         ppd->remote_link_down_reason = 0;
10280
10281                         if (ppd->driver_link_ready)
10282                                 ppd->link_enabled = 1;
10283                 }
10284
10285                 set_all_slowpath(ppd->dd);
10286                 ret = set_local_link_attributes(ppd);
10287                 if (ret)
10288                         break;
10289
10290                 ppd->port_error_action = 0;
10291                 ppd->host_link_state = HLS_DN_POLL;
10292
10293                 if (quick_linkup) {
10294                         /* quick linkup does not go into polling */
10295                         ret = do_quick_linkup(dd);
10296                 } else {
10297                         ret1 = set_physical_link_state(dd, PLS_POLLING);
10298                         if (ret1 != HCMD_SUCCESS) {
10299                                 dd_dev_err(dd,
10300                                            "Failed to transition to Polling link state, return 0x%x\n",
10301                                            ret1);
10302                                 ret = -EINVAL;
10303                         }
10304                 }
10305                 ppd->offline_disabled_reason =
10306                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10307                 /*
10308                  * If an error occurred above, go back to offline.  The
10309                  * caller may reschedule another attempt.
10310                  */
10311                 if (ret)
10312                         goto_offline(ppd, 0);
10313                 break;
10314         case HLS_DN_DISABLE:
10315                 /* link is disabled */
10316                 ppd->link_enabled = 0;
10317
10318                 /* allow any state to transition to disabled */
10319
10320                 /* must transition to offline first */
10321                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10322                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10323                         if (ret)
10324                                 break;
10325                         ppd->remote_link_down_reason = 0;
10326                 }
10327
10328                 ret1 = set_physical_link_state(dd, PLS_DISABLED);
10329                 if (ret1 != HCMD_SUCCESS) {
10330                         dd_dev_err(dd,
10331                                    "Failed to transition to Disabled link state, return 0x%x\n",
10332                                    ret1);
10333                         ret = -EINVAL;
10334                         break;
10335                 }
10336                 ppd->host_link_state = HLS_DN_DISABLE;
10337                 dc_shutdown(dd);
10338                 break;
10339         case HLS_DN_OFFLINE:
10340                 if (ppd->host_link_state == HLS_DN_DISABLE)
10341                         dc_start(dd);
10342
10343                 /* allow any state to transition to offline */
10344                 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10345                 if (!ret)
10346                         ppd->remote_link_down_reason = 0;
10347                 break;
10348         case HLS_VERIFY_CAP:
10349                 if (ppd->host_link_state != HLS_DN_POLL)
10350                         goto unexpected;
10351                 ppd->host_link_state = HLS_VERIFY_CAP;
10352                 break;
10353         case HLS_GOING_UP:
10354                 if (ppd->host_link_state != HLS_VERIFY_CAP)
10355                         goto unexpected;
10356
10357                 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10358                 if (ret1 != HCMD_SUCCESS) {
10359                         dd_dev_err(dd,
10360                                    "Failed to transition to link up state, return 0x%x\n",
10361                                    ret1);
10362                         ret = -EINVAL;
10363                         break;
10364                 }
10365                 ppd->host_link_state = HLS_GOING_UP;
10366                 break;
10367
10368         case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10369         case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10370         default:
10371                 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10372                             __func__, state);
10373                 ret = -EINVAL;
10374                 break;
10375         }
10376
10377         goto done;
10378
10379 unexpected:
10380         dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10381                    __func__, link_state_name(ppd->host_link_state),
10382                    link_state_name(state));
10383         ret = -EINVAL;
10384
10385 done:
10386         mutex_unlock(&ppd->hls_lock);
10387
10388         if (event.device)
10389                 ib_dispatch_event(&event);
10390
10391         return ret;
10392 }
10393
10394 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10395 {
10396         u64 reg;
10397         int ret = 0;
10398
10399         switch (which) {
10400         case HFI1_IB_CFG_LIDLMC:
10401                 set_lidlmc(ppd);
10402                 break;
10403         case HFI1_IB_CFG_VL_HIGH_LIMIT:
10404                 /*
10405                  * The VL Arbitrator high limit is sent in units of 4k
10406                  * bytes, while HFI stores it in units of 64 bytes.
10407                  */
10408                 val *= 4096 / 64;
10409                 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10410                         << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10411                 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10412                 break;
10413         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10414                 /* HFI only supports POLL as the default link down state */
10415                 if (val != HLS_DN_POLL)
10416                         ret = -EINVAL;
10417                 break;
10418         case HFI1_IB_CFG_OP_VLS:
10419                 if (ppd->vls_operational != val) {
10420                         ppd->vls_operational = val;
10421                         if (!ppd->port)
10422                                 ret = -EINVAL;
10423                 }
10424                 break;
10425         /*
10426          * For link width, link width downgrade, and speed enable, always AND
10427          * the setting with what is actually supported.  This has two benefits.
10428          * First, enabled can't have unsupported values, no matter what the
10429          * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10430          * "fill in with your supported value" have all the bits in the
10431          * field set, so simply ANDing with supported has the desired result.
10432          */
10433         case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10434                 ppd->link_width_enabled = val & ppd->link_width_supported;
10435                 break;
10436         case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10437                 ppd->link_width_downgrade_enabled =
10438                                 val & ppd->link_width_downgrade_supported;
10439                 break;
10440         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10441                 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10442                 break;
10443         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10444                 /*
10445                  * HFI does not follow IB specs, save this value
10446                  * so we can report it, if asked.
10447                  */
10448                 ppd->overrun_threshold = val;
10449                 break;
10450         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10451                 /*
10452                  * HFI does not follow IB specs, save this value
10453                  * so we can report it, if asked.
10454                  */
10455                 ppd->phy_error_threshold = val;
10456                 break;
10457
10458         case HFI1_IB_CFG_MTU:
10459                 set_send_length(ppd);
10460                 break;
10461
10462         case HFI1_IB_CFG_PKEYS:
10463                 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10464                         set_partition_keys(ppd);
10465                 break;
10466
10467         default:
10468                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10469                         dd_dev_info(ppd->dd,
10470                                     "%s: which %s, val 0x%x: not implemented\n",
10471                                     __func__, ib_cfg_name(which), val);
10472                 break;
10473         }
10474         return ret;
10475 }
10476
10477 /* begin functions related to vl arbitration table caching */
10478 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
10479 {
10480         int i;
10481
10482         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10483                         VL_ARB_LOW_PRIO_TABLE_SIZE);
10484         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
10485                         VL_ARB_HIGH_PRIO_TABLE_SIZE);
10486
10487         /*
10488          * Note that we always return values directly from the
10489          * 'vl_arb_cache' (and do no CSR reads) in response to a
10490          * 'Get(VLArbTable)'. This is obviously correct after a
10491          * 'Set(VLArbTable)', since the cache will then be up to
10492          * date. But it's also correct prior to any 'Set(VLArbTable)'
10493          * since then both the cache, and the relevant h/w registers
10494          * will be zeroed.
10495          */
10496
10497         for (i = 0; i < MAX_PRIO_TABLE; i++)
10498                 spin_lock_init(&ppd->vl_arb_cache[i].lock);
10499 }
10500
10501 /*
10502  * vl_arb_lock_cache
10503  *
10504  * All other vl_arb_* functions should be called only after locking
10505  * the cache.
10506  */
10507 static inline struct vl_arb_cache *
10508 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
10509 {
10510         if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
10511                 return NULL;
10512         spin_lock(&ppd->vl_arb_cache[idx].lock);
10513         return &ppd->vl_arb_cache[idx];
10514 }
10515
10516 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
10517 {
10518         spin_unlock(&ppd->vl_arb_cache[idx].lock);
10519 }
10520
10521 static void vl_arb_get_cache(struct vl_arb_cache *cache,
10522                              struct ib_vl_weight_elem *vl)
10523 {
10524         memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
10525 }
10526
10527 static void vl_arb_set_cache(struct vl_arb_cache *cache,
10528                              struct ib_vl_weight_elem *vl)
10529 {
10530         memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10531 }
10532
10533 static int vl_arb_match_cache(struct vl_arb_cache *cache,
10534                               struct ib_vl_weight_elem *vl)
10535 {
10536         return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
10537 }
10538
10539 /* end functions related to vl arbitration table caching */
10540
10541 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
10542                           u32 size, struct ib_vl_weight_elem *vl)
10543 {
10544         struct hfi1_devdata *dd = ppd->dd;
10545         u64 reg;
10546         unsigned int i, is_up = 0;
10547         int drain, ret = 0;
10548
10549         mutex_lock(&ppd->hls_lock);
10550
10551         if (ppd->host_link_state & HLS_UP)
10552                 is_up = 1;
10553
10554         drain = !is_ax(dd) && is_up;
10555
10556         if (drain)
10557                 /*
10558                  * Before adjusting VL arbitration weights, empty per-VL
10559                  * FIFOs, otherwise a packet whose VL weight is being
10560                  * set to 0 could get stuck in a FIFO with no chance to
10561                  * egress.
10562                  */
10563                 ret = stop_drain_data_vls(dd);
10564
10565         if (ret) {
10566                 dd_dev_err(
10567                         dd,
10568                         "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
10569                         __func__);
10570                 goto err;
10571         }
10572
10573         for (i = 0; i < size; i++, vl++) {
10574                 /*
10575                  * NOTE: The low priority shift and mask are used here, but
10576                  * they are the same for both the low and high registers.
10577                  */
10578                 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
10579                                 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
10580                       | (((u64)vl->weight
10581                                 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
10582                                 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
10583                 write_csr(dd, target + (i * 8), reg);
10584         }
10585         pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
10586
10587         if (drain)
10588                 open_fill_data_vls(dd); /* reopen all VLs */
10589
10590 err:
10591         mutex_unlock(&ppd->hls_lock);
10592
10593         return ret;
10594 }
10595
10596 /*
10597  * Read one credit merge VL register.
10598  */
10599 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
10600                            struct vl_limit *vll)
10601 {
10602         u64 reg = read_csr(dd, csr);
10603
10604         vll->dedicated = cpu_to_be16(
10605                 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
10606                 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
10607         vll->shared = cpu_to_be16(
10608                 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
10609                 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
10610 }
10611
10612 /*
10613  * Read the current credit merge limits.
10614  */
10615 static int get_buffer_control(struct hfi1_devdata *dd,
10616                               struct buffer_control *bc, u16 *overall_limit)
10617 {
10618         u64 reg;
10619         int i;
10620
10621         /* not all entries are filled in */
10622         memset(bc, 0, sizeof(*bc));
10623
10624         /* OPA and HFI have a 1-1 mapping */
10625         for (i = 0; i < TXE_NUM_DATA_VL; i++)
10626                 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
10627
10628         /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
10629         read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
10630
10631         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10632         bc->overall_shared_limit = cpu_to_be16(
10633                 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
10634                 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
10635         if (overall_limit)
10636                 *overall_limit = (reg
10637                         >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
10638                         & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
10639         return sizeof(struct buffer_control);
10640 }
10641
10642 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10643 {
10644         u64 reg;
10645         int i;
10646
10647         /* each register contains 16 SC->VLnt mappings, 4 bits each */
10648         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
10649         for (i = 0; i < sizeof(u64); i++) {
10650                 u8 byte = *(((u8 *)&reg) + i);
10651
10652                 dp->vlnt[2 * i] = byte & 0xf;
10653                 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
10654         }
10655
10656         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
10657         for (i = 0; i < sizeof(u64); i++) {
10658                 u8 byte = *(((u8 *)&reg) + i);
10659
10660                 dp->vlnt[16 + (2 * i)] = byte & 0xf;
10661                 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
10662         }
10663         return sizeof(struct sc2vlnt);
10664 }
10665
10666 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
10667                               struct ib_vl_weight_elem *vl)
10668 {
10669         unsigned int i;
10670
10671         for (i = 0; i < nelems; i++, vl++) {
10672                 vl->vl = 0xf;
10673                 vl->weight = 0;
10674         }
10675 }
10676
10677 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
10678 {
10679         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
10680                   DC_SC_VL_VAL(15_0,
10681                                0, dp->vlnt[0] & 0xf,
10682                                1, dp->vlnt[1] & 0xf,
10683                                2, dp->vlnt[2] & 0xf,
10684                                3, dp->vlnt[3] & 0xf,
10685                                4, dp->vlnt[4] & 0xf,
10686                                5, dp->vlnt[5] & 0xf,
10687                                6, dp->vlnt[6] & 0xf,
10688                                7, dp->vlnt[7] & 0xf,
10689                                8, dp->vlnt[8] & 0xf,
10690                                9, dp->vlnt[9] & 0xf,
10691                                10, dp->vlnt[10] & 0xf,
10692                                11, dp->vlnt[11] & 0xf,
10693                                12, dp->vlnt[12] & 0xf,
10694                                13, dp->vlnt[13] & 0xf,
10695                                14, dp->vlnt[14] & 0xf,
10696                                15, dp->vlnt[15] & 0xf));
10697         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
10698                   DC_SC_VL_VAL(31_16,
10699                                16, dp->vlnt[16] & 0xf,
10700                                17, dp->vlnt[17] & 0xf,
10701                                18, dp->vlnt[18] & 0xf,
10702                                19, dp->vlnt[19] & 0xf,
10703                                20, dp->vlnt[20] & 0xf,
10704                                21, dp->vlnt[21] & 0xf,
10705                                22, dp->vlnt[22] & 0xf,
10706                                23, dp->vlnt[23] & 0xf,
10707                                24, dp->vlnt[24] & 0xf,
10708                                25, dp->vlnt[25] & 0xf,
10709                                26, dp->vlnt[26] & 0xf,
10710                                27, dp->vlnt[27] & 0xf,
10711                                28, dp->vlnt[28] & 0xf,
10712                                29, dp->vlnt[29] & 0xf,
10713                                30, dp->vlnt[30] & 0xf,
10714                                31, dp->vlnt[31] & 0xf));
10715 }
10716
10717 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
10718                         u16 limit)
10719 {
10720         if (limit != 0)
10721                 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
10722                             what, (int)limit, idx);
10723 }
10724
10725 /* change only the shared limit portion of SendCmGLobalCredit */
10726 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
10727 {
10728         u64 reg;
10729
10730         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10731         reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
10732         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
10733         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10734 }
10735
10736 /* change only the total credit limit portion of SendCmGLobalCredit */
10737 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
10738 {
10739         u64 reg;
10740
10741         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
10742         reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
10743         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
10744         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
10745 }
10746
10747 /* set the given per-VL shared limit */
10748 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
10749 {
10750         u64 reg;
10751         u32 addr;
10752
10753         if (vl < TXE_NUM_DATA_VL)
10754                 addr = SEND_CM_CREDIT_VL + (8 * vl);
10755         else
10756                 addr = SEND_CM_CREDIT_VL15;
10757
10758         reg = read_csr(dd, addr);
10759         reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
10760         reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
10761         write_csr(dd, addr, reg);
10762 }
10763
10764 /* set the given per-VL dedicated limit */
10765 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
10766 {
10767         u64 reg;
10768         u32 addr;
10769
10770         if (vl < TXE_NUM_DATA_VL)
10771                 addr = SEND_CM_CREDIT_VL + (8 * vl);
10772         else
10773                 addr = SEND_CM_CREDIT_VL15;
10774
10775         reg = read_csr(dd, addr);
10776         reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
10777         reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
10778         write_csr(dd, addr, reg);
10779 }
10780
10781 /* spin until the given per-VL status mask bits clear */
10782 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
10783                                      const char *which)
10784 {
10785         unsigned long timeout;
10786         u64 reg;
10787
10788         timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
10789         while (1) {
10790                 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
10791
10792                 if (reg == 0)
10793                         return; /* success */
10794                 if (time_after(jiffies, timeout))
10795                         break;          /* timed out */
10796                 udelay(1);
10797         }
10798
10799         dd_dev_err(dd,
10800                    "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
10801                    which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
10802         /*
10803          * If this occurs, it is likely there was a credit loss on the link.
10804          * The only recovery from that is a link bounce.
10805          */
10806         dd_dev_err(dd,
10807                    "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
10808 }
10809
10810 /*
10811  * The number of credits on the VLs may be changed while everything
10812  * is "live", but the following algorithm must be followed due to
10813  * how the hardware is actually implemented.  In particular,
10814  * Return_Credit_Status[] is the only correct status check.
10815  *
10816  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
10817  *     set Global_Shared_Credit_Limit = 0
10818  *     use_all_vl = 1
10819  * mask0 = all VLs that are changing either dedicated or shared limits
10820  * set Shared_Limit[mask0] = 0
10821  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
10822  * if (changing any dedicated limit)
10823  *     mask1 = all VLs that are lowering dedicated limits
10824  *     lower Dedicated_Limit[mask1]
10825  *     spin until Return_Credit_Status[mask1] == 0
10826  *     raise Dedicated_Limits
10827  * raise Shared_Limits
10828  * raise Global_Shared_Credit_Limit
10829  *
10830  * lower = if the new limit is lower, set the limit to the new value
10831  * raise = if the new limit is higher than the current value (may be changed
10832  *      earlier in the algorithm), set the new limit to the new value
10833  */
10834 int set_buffer_control(struct hfi1_pportdata *ppd,
10835                        struct buffer_control *new_bc)
10836 {
10837         struct hfi1_devdata *dd = ppd->dd;
10838         u64 changing_mask, ld_mask, stat_mask;
10839         int change_count;
10840         int i, use_all_mask;
10841         int this_shared_changing;
10842         int vl_count = 0, ret;
10843         /*
10844          * A0: add the variable any_shared_limit_changing below and in the
10845          * algorithm above.  If removing A0 support, it can be removed.
10846          */
10847         int any_shared_limit_changing;
10848         struct buffer_control cur_bc;
10849         u8 changing[OPA_MAX_VLS];
10850         u8 lowering_dedicated[OPA_MAX_VLS];
10851         u16 cur_total;
10852         u32 new_total = 0;
10853         const u64 all_mask =
10854         SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
10855          | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
10856          | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
10857          | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
10858          | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
10859          | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
10860          | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
10861          | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
10862          | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
10863
10864 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
10865 #define NUM_USABLE_VLS 16       /* look at VL15 and less */
10866
10867         /* find the new total credits, do sanity check on unused VLs */
10868         for (i = 0; i < OPA_MAX_VLS; i++) {
10869                 if (valid_vl(i)) {
10870                         new_total += be16_to_cpu(new_bc->vl[i].dedicated);
10871                         continue;
10872                 }
10873                 nonzero_msg(dd, i, "dedicated",
10874                             be16_to_cpu(new_bc->vl[i].dedicated));
10875                 nonzero_msg(dd, i, "shared",
10876                             be16_to_cpu(new_bc->vl[i].shared));
10877                 new_bc->vl[i].dedicated = 0;
10878                 new_bc->vl[i].shared = 0;
10879         }
10880         new_total += be16_to_cpu(new_bc->overall_shared_limit);
10881
10882         /* fetch the current values */
10883         get_buffer_control(dd, &cur_bc, &cur_total);
10884
10885         /*
10886          * Create the masks we will use.
10887          */
10888         memset(changing, 0, sizeof(changing));
10889         memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
10890         /*
10891          * NOTE: Assumes that the individual VL bits are adjacent and in
10892          * increasing order
10893          */
10894         stat_mask =
10895                 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
10896         changing_mask = 0;
10897         ld_mask = 0;
10898         change_count = 0;
10899         any_shared_limit_changing = 0;
10900         for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
10901                 if (!valid_vl(i))
10902                         continue;
10903                 this_shared_changing = new_bc->vl[i].shared
10904                                                 != cur_bc.vl[i].shared;
10905                 if (this_shared_changing)
10906                         any_shared_limit_changing = 1;
10907                 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
10908                     this_shared_changing) {
10909                         changing[i] = 1;
10910                         changing_mask |= stat_mask;
10911                         change_count++;
10912                 }
10913                 if (be16_to_cpu(new_bc->vl[i].dedicated) <
10914                                         be16_to_cpu(cur_bc.vl[i].dedicated)) {
10915                         lowering_dedicated[i] = 1;
10916                         ld_mask |= stat_mask;
10917                 }
10918         }
10919
10920         /* bracket the credit change with a total adjustment */
10921         if (new_total > cur_total)
10922                 set_global_limit(dd, new_total);
10923
10924         /*
10925          * Start the credit change algorithm.
10926          */
10927         use_all_mask = 0;
10928         if ((be16_to_cpu(new_bc->overall_shared_limit) <
10929              be16_to_cpu(cur_bc.overall_shared_limit)) ||
10930             (is_ax(dd) && any_shared_limit_changing)) {
10931                 set_global_shared(dd, 0);
10932                 cur_bc.overall_shared_limit = 0;
10933                 use_all_mask = 1;
10934         }
10935
10936         for (i = 0; i < NUM_USABLE_VLS; i++) {
10937                 if (!valid_vl(i))
10938                         continue;
10939
10940                 if (changing[i]) {
10941                         set_vl_shared(dd, i, 0);
10942                         cur_bc.vl[i].shared = 0;
10943                 }
10944         }
10945
10946         wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
10947                                  "shared");
10948
10949         if (change_count > 0) {
10950                 for (i = 0; i < NUM_USABLE_VLS; i++) {
10951                         if (!valid_vl(i))
10952                                 continue;
10953
10954                         if (lowering_dedicated[i]) {
10955                                 set_vl_dedicated(dd, i,
10956                                                  be16_to_cpu(new_bc->
10957                                                              vl[i].dedicated));
10958                                 cur_bc.vl[i].dedicated =
10959                                                 new_bc->vl[i].dedicated;
10960                         }
10961                 }
10962
10963                 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
10964
10965                 /* now raise all dedicated that are going up */
10966                 for (i = 0; i < NUM_USABLE_VLS; i++) {
10967                         if (!valid_vl(i))
10968                                 continue;
10969
10970                         if (be16_to_cpu(new_bc->vl[i].dedicated) >
10971                                         be16_to_cpu(cur_bc.vl[i].dedicated))
10972                                 set_vl_dedicated(dd, i,
10973                                                  be16_to_cpu(new_bc->
10974                                                              vl[i].dedicated));
10975                 }
10976         }
10977
10978         /* next raise all shared that are going up */
10979         for (i = 0; i < NUM_USABLE_VLS; i++) {
10980                 if (!valid_vl(i))
10981                         continue;
10982
10983                 if (be16_to_cpu(new_bc->vl[i].shared) >
10984                                 be16_to_cpu(cur_bc.vl[i].shared))
10985                         set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
10986         }
10987
10988         /* finally raise the global shared */
10989         if (be16_to_cpu(new_bc->overall_shared_limit) >
10990             be16_to_cpu(cur_bc.overall_shared_limit))
10991                 set_global_shared(dd,
10992                                   be16_to_cpu(new_bc->overall_shared_limit));
10993
10994         /* bracket the credit change with a total adjustment */
10995         if (new_total < cur_total)
10996                 set_global_limit(dd, new_total);
10997
10998         /*
10999          * Determine the actual number of operational VLS using the number of
11000          * dedicated and shared credits for each VL.
11001          */
11002         if (change_count > 0) {
11003                 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11004                         if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11005                             be16_to_cpu(new_bc->vl[i].shared) > 0)
11006                                 vl_count++;
11007                 ppd->actual_vls_operational = vl_count;
11008                 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11009                                     ppd->actual_vls_operational :
11010                                     ppd->vls_operational,
11011                                     NULL);
11012                 if (ret == 0)
11013                         ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11014                                            ppd->actual_vls_operational :
11015                                            ppd->vls_operational, NULL);
11016                 if (ret)
11017                         return ret;
11018         }
11019         return 0;
11020 }
11021
11022 /*
11023  * Read the given fabric manager table. Return the size of the
11024  * table (in bytes) on success, and a negative error code on
11025  * failure.
11026  */
11027 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11028
11029 {
11030         int size;
11031         struct vl_arb_cache *vlc;
11032
11033         switch (which) {
11034         case FM_TBL_VL_HIGH_ARB:
11035                 size = 256;
11036                 /*
11037                  * OPA specifies 128 elements (of 2 bytes each), though
11038                  * HFI supports only 16 elements in h/w.
11039                  */
11040                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11041                 vl_arb_get_cache(vlc, t);
11042                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11043                 break;
11044         case FM_TBL_VL_LOW_ARB:
11045                 size = 256;
11046                 /*
11047                  * OPA specifies 128 elements (of 2 bytes each), though
11048                  * HFI supports only 16 elements in h/w.
11049                  */
11050                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11051                 vl_arb_get_cache(vlc, t);
11052                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11053                 break;
11054         case FM_TBL_BUFFER_CONTROL:
11055                 size = get_buffer_control(ppd->dd, t, NULL);
11056                 break;
11057         case FM_TBL_SC2VLNT:
11058                 size = get_sc2vlnt(ppd->dd, t);
11059                 break;
11060         case FM_TBL_VL_PREEMPT_ELEMS:
11061                 size = 256;
11062                 /* OPA specifies 128 elements, of 2 bytes each */
11063                 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11064                 break;
11065         case FM_TBL_VL_PREEMPT_MATRIX:
11066                 size = 256;
11067                 /*
11068                  * OPA specifies that this is the same size as the VL
11069                  * arbitration tables (i.e., 256 bytes).
11070                  */
11071                 break;
11072         default:
11073                 return -EINVAL;
11074         }
11075         return size;
11076 }
11077
11078 /*
11079  * Write the given fabric manager table.
11080  */
11081 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11082 {
11083         int ret = 0;
11084         struct vl_arb_cache *vlc;
11085
11086         switch (which) {
11087         case FM_TBL_VL_HIGH_ARB:
11088                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11089                 if (vl_arb_match_cache(vlc, t)) {
11090                         vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11091                         break;
11092                 }
11093                 vl_arb_set_cache(vlc, t);
11094                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11095                 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11096                                      VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11097                 break;
11098         case FM_TBL_VL_LOW_ARB:
11099                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11100                 if (vl_arb_match_cache(vlc, t)) {
11101                         vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11102                         break;
11103                 }
11104                 vl_arb_set_cache(vlc, t);
11105                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11106                 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11107                                      VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11108                 break;
11109         case FM_TBL_BUFFER_CONTROL:
11110                 ret = set_buffer_control(ppd, t);
11111                 break;
11112         case FM_TBL_SC2VLNT:
11113                 set_sc2vlnt(ppd->dd, t);
11114                 break;
11115         default:
11116                 ret = -EINVAL;
11117         }
11118         return ret;
11119 }
11120
11121 /*
11122  * Disable all data VLs.
11123  *
11124  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11125  */
11126 static int disable_data_vls(struct hfi1_devdata *dd)
11127 {
11128         if (is_ax(dd))
11129                 return 1;
11130
11131         pio_send_control(dd, PSC_DATA_VL_DISABLE);
11132
11133         return 0;
11134 }
11135
11136 /*
11137  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11138  * Just re-enables all data VLs (the "fill" part happens
11139  * automatically - the name was chosen for symmetry with
11140  * stop_drain_data_vls()).
11141  *
11142  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11143  */
11144 int open_fill_data_vls(struct hfi1_devdata *dd)
11145 {
11146         if (is_ax(dd))
11147                 return 1;
11148
11149         pio_send_control(dd, PSC_DATA_VL_ENABLE);
11150
11151         return 0;
11152 }
11153
11154 /*
11155  * drain_data_vls() - assumes that disable_data_vls() has been called,
11156  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11157  * engines to drop to 0.
11158  */
11159 static void drain_data_vls(struct hfi1_devdata *dd)
11160 {
11161         sc_wait(dd);
11162         sdma_wait(dd);
11163         pause_for_credit_return(dd);
11164 }
11165
11166 /*
11167  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11168  *
11169  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11170  * meant to be used like this:
11171  *
11172  * stop_drain_data_vls(dd);
11173  * // do things with per-VL resources
11174  * open_fill_data_vls(dd);
11175  */
11176 int stop_drain_data_vls(struct hfi1_devdata *dd)
11177 {
11178         int ret;
11179
11180         ret = disable_data_vls(dd);
11181         if (ret == 0)
11182                 drain_data_vls(dd);
11183
11184         return ret;
11185 }
11186
11187 /*
11188  * Convert a nanosecond time to a cclock count.  No matter how slow
11189  * the cclock, a non-zero ns will always have a non-zero result.
11190  */
11191 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11192 {
11193         u32 cclocks;
11194
11195         if (dd->icode == ICODE_FPGA_EMULATION)
11196                 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11197         else  /* simulation pretends to be ASIC */
11198                 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11199         if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11200                 cclocks = 1;
11201         return cclocks;
11202 }
11203
11204 /*
11205  * Convert a cclock count to nanoseconds. Not matter how slow
11206  * the cclock, a non-zero cclocks will always have a non-zero result.
11207  */
11208 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11209 {
11210         u32 ns;
11211
11212         if (dd->icode == ICODE_FPGA_EMULATION)
11213                 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11214         else  /* simulation pretends to be ASIC */
11215                 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11216         if (cclocks && !ns)
11217                 ns = 1;
11218         return ns;
11219 }
11220
11221 /*
11222  * Dynamically adjust the receive interrupt timeout for a context based on
11223  * incoming packet rate.
11224  *
11225  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11226  */
11227 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11228 {
11229         struct hfi1_devdata *dd = rcd->dd;
11230         u32 timeout = rcd->rcvavail_timeout;
11231
11232         /*
11233          * This algorithm doubles or halves the timeout depending on whether
11234          * the number of packets received in this interrupt were less than or
11235          * greater equal the interrupt count.
11236          *
11237          * The calculations below do not allow a steady state to be achieved.
11238          * Only at the endpoints it is possible to have an unchanging
11239          * timeout.
11240          */
11241         if (npkts < rcv_intr_count) {
11242                 /*
11243                  * Not enough packets arrived before the timeout, adjust
11244                  * timeout downward.
11245                  */
11246                 if (timeout < 2) /* already at minimum? */
11247                         return;
11248                 timeout >>= 1;
11249         } else {
11250                 /*
11251                  * More than enough packets arrived before the timeout, adjust
11252                  * timeout upward.
11253                  */
11254                 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11255                         return;
11256                 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11257         }
11258
11259         rcd->rcvavail_timeout = timeout;
11260         /*
11261          * timeout cannot be larger than rcv_intr_timeout_csr which has already
11262          * been verified to be in range
11263          */
11264         write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11265                         (u64)timeout <<
11266                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11267 }
11268
11269 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11270                     u32 intr_adjust, u32 npkts)
11271 {
11272         struct hfi1_devdata *dd = rcd->dd;
11273         u64 reg;
11274         u32 ctxt = rcd->ctxt;
11275
11276         /*
11277          * Need to write timeout register before updating RcvHdrHead to ensure
11278          * that a new value is used when the HW decides to restart counting.
11279          */
11280         if (intr_adjust)
11281                 adjust_rcv_timeout(rcd, npkts);
11282         if (updegr) {
11283                 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11284                         << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11285                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11286         }
11287         mmiowb();
11288         reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11289                 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11290                         << RCV_HDR_HEAD_HEAD_SHIFT);
11291         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11292         mmiowb();
11293 }
11294
11295 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11296 {
11297         u32 head, tail;
11298
11299         head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11300                 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11301
11302         if (rcd->rcvhdrtail_kvaddr)
11303                 tail = get_rcvhdrtail(rcd);
11304         else
11305                 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11306
11307         return head == tail;
11308 }
11309
11310 /*
11311  * Context Control and Receive Array encoding for buffer size:
11312  *      0x0 invalid
11313  *      0x1   4 KB
11314  *      0x2   8 KB
11315  *      0x3  16 KB
11316  *      0x4  32 KB
11317  *      0x5  64 KB
11318  *      0x6 128 KB
11319  *      0x7 256 KB
11320  *      0x8 512 KB (Receive Array only)
11321  *      0x9   1 MB (Receive Array only)
11322  *      0xa   2 MB (Receive Array only)
11323  *
11324  *      0xB-0xF - reserved (Receive Array only)
11325  *
11326  *
11327  * This routine assumes that the value has already been sanity checked.
11328  */
11329 static u32 encoded_size(u32 size)
11330 {
11331         switch (size) {
11332         case   4 * 1024: return 0x1;
11333         case   8 * 1024: return 0x2;
11334         case  16 * 1024: return 0x3;
11335         case  32 * 1024: return 0x4;
11336         case  64 * 1024: return 0x5;
11337         case 128 * 1024: return 0x6;
11338         case 256 * 1024: return 0x7;
11339         case 512 * 1024: return 0x8;
11340         case   1 * 1024 * 1024: return 0x9;
11341         case   2 * 1024 * 1024: return 0xa;
11342         }
11343         return 0x1;     /* if invalid, go with the minimum size */
11344 }
11345
11346 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt)
11347 {
11348         struct hfi1_ctxtdata *rcd;
11349         u64 rcvctrl, reg;
11350         int did_enable = 0;
11351
11352         rcd = dd->rcd[ctxt];
11353         if (!rcd)
11354                 return;
11355
11356         hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11357
11358         rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11359         /* if the context already enabled, don't do the extra steps */
11360         if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11361             !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11362                 /* reset the tail and hdr addresses, and sequence count */
11363                 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11364                                 rcd->rcvhdrq_phys);
11365                 if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
11366                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11367                                         rcd->rcvhdrqtailaddr_phys);
11368                 rcd->seq_cnt = 1;
11369
11370                 /* reset the cached receive header queue head value */
11371                 rcd->head = 0;
11372
11373                 /*
11374                  * Zero the receive header queue so we don't get false
11375                  * positives when checking the sequence number.  The
11376                  * sequence numbers could land exactly on the same spot.
11377                  * E.g. a rcd restart before the receive header wrapped.
11378                  */
11379                 memset(rcd->rcvhdrq, 0, rcd->rcvhdrq_size);
11380
11381                 /* starting timeout */
11382                 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11383
11384                 /* enable the context */
11385                 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11386
11387                 /* clean the egr buffer size first */
11388                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11389                 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11390                                 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11391                                         << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11392
11393                 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11394                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11395                 did_enable = 1;
11396
11397                 /* zero RcvEgrIndexHead */
11398                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11399
11400                 /* set eager count and base index */
11401                 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11402                         & RCV_EGR_CTRL_EGR_CNT_MASK)
11403                        << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11404                         (((rcd->eager_base >> RCV_SHIFT)
11405                           & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11406                          << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11407                 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11408
11409                 /*
11410                  * Set TID (expected) count and base index.
11411                  * rcd->expected_count is set to individual RcvArray entries,
11412                  * not pairs, and the CSR takes a pair-count in groups of
11413                  * four, so divide by 8.
11414                  */
11415                 reg = (((rcd->expected_count >> RCV_SHIFT)
11416                                         & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11417                                 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11418                       (((rcd->expected_base >> RCV_SHIFT)
11419                                         & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11420                                 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11421                 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11422                 if (ctxt == HFI1_CTRL_CTXT)
11423                         write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11424         }
11425         if (op & HFI1_RCVCTRL_CTXT_DIS) {
11426                 write_csr(dd, RCV_VL15, 0);
11427                 /*
11428                  * When receive context is being disabled turn on tail
11429                  * update with a dummy tail address and then disable
11430                  * receive context.
11431                  */
11432                 if (dd->rcvhdrtail_dummy_physaddr) {
11433                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11434                                         dd->rcvhdrtail_dummy_physaddr);
11435                         /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11436                         rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11437                 }
11438
11439                 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11440         }
11441         if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
11442                 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11443         if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
11444                 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11445         if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
11446                 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11447         if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11448                 /* See comment on RcvCtxtCtrl.TailUpd above */
11449                 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11450                         rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11451         }
11452         if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11453                 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11454         if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11455                 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11456         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11457                 /*
11458                  * In one-packet-per-eager mode, the size comes from
11459                  * the RcvArray entry.
11460                  */
11461                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11462                 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11463         }
11464         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11465                 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11466         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11467                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11468         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11469                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11470         if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11471                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11472         if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
11473                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
11474         rcd->rcvctrl = rcvctrl;
11475         hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
11476         write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcd->rcvctrl);
11477
11478         /* work around sticky RcvCtxtStatus.BlockedRHQFull */
11479         if (did_enable &&
11480             (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
11481                 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11482                 if (reg != 0) {
11483                         dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
11484                                     ctxt, reg);
11485                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11486                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
11487                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
11488                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
11489                         reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
11490                         dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
11491                                     ctxt, reg, reg == 0 ? "not" : "still");
11492                 }
11493         }
11494
11495         if (did_enable) {
11496                 /*
11497                  * The interrupt timeout and count must be set after
11498                  * the context is enabled to take effect.
11499                  */
11500                 /* set interrupt timeout */
11501                 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
11502                                 (u64)rcd->rcvavail_timeout <<
11503                                 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11504
11505                 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
11506                 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
11507                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11508         }
11509
11510         if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
11511                 /*
11512                  * If the context has been disabled and the Tail Update has
11513                  * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
11514                  * so it doesn't contain an address that is invalid.
11515                  */
11516                 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11517                                 dd->rcvhdrtail_dummy_physaddr);
11518 }
11519
11520 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
11521 {
11522         int ret;
11523         u64 val = 0;
11524
11525         if (namep) {
11526                 ret = dd->cntrnameslen;
11527                 *namep = dd->cntrnames;
11528         } else {
11529                 const struct cntr_entry *entry;
11530                 int i, j;
11531
11532                 ret = (dd->ndevcntrs) * sizeof(u64);
11533
11534                 /* Get the start of the block of counters */
11535                 *cntrp = dd->cntrs;
11536
11537                 /*
11538                  * Now go and fill in each counter in the block.
11539                  */
11540                 for (i = 0; i < DEV_CNTR_LAST; i++) {
11541                         entry = &dev_cntrs[i];
11542                         hfi1_cdbg(CNTR, "reading %s", entry->name);
11543                         if (entry->flags & CNTR_DISABLED) {
11544                                 /* Nothing */
11545                                 hfi1_cdbg(CNTR, "\tDisabled\n");
11546                         } else {
11547                                 if (entry->flags & CNTR_VL) {
11548                                         hfi1_cdbg(CNTR, "\tPer VL\n");
11549                                         for (j = 0; j < C_VL_COUNT; j++) {
11550                                                 val = entry->rw_cntr(entry,
11551                                                                   dd, j,
11552                                                                   CNTR_MODE_R,
11553                                                                   0);
11554                                                 hfi1_cdbg(
11555                                                    CNTR,
11556                                                    "\t\tRead 0x%llx for %d\n",
11557                                                    val, j);
11558                                                 dd->cntrs[entry->offset + j] =
11559                                                                             val;
11560                                         }
11561                                 } else if (entry->flags & CNTR_SDMA) {
11562                                         hfi1_cdbg(CNTR,
11563                                                   "\t Per SDMA Engine\n");
11564                                         for (j = 0; j < dd->chip_sdma_engines;
11565                                              j++) {
11566                                                 val =
11567                                                 entry->rw_cntr(entry, dd, j,
11568                                                                CNTR_MODE_R, 0);
11569                                                 hfi1_cdbg(CNTR,
11570                                                           "\t\tRead 0x%llx for %d\n",
11571                                                           val, j);
11572                                                 dd->cntrs[entry->offset + j] =
11573                                                                         val;
11574                                         }
11575                                 } else {
11576                                         val = entry->rw_cntr(entry, dd,
11577                                                         CNTR_INVALID_VL,
11578                                                         CNTR_MODE_R, 0);
11579                                         dd->cntrs[entry->offset] = val;
11580                                         hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11581                                 }
11582                         }
11583                 }
11584         }
11585         return ret;
11586 }
11587
11588 /*
11589  * Used by sysfs to create files for hfi stats to read
11590  */
11591 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
11592 {
11593         int ret;
11594         u64 val = 0;
11595
11596         if (namep) {
11597                 ret = ppd->dd->portcntrnameslen;
11598                 *namep = ppd->dd->portcntrnames;
11599         } else {
11600                 const struct cntr_entry *entry;
11601                 int i, j;
11602
11603                 ret = ppd->dd->nportcntrs * sizeof(u64);
11604                 *cntrp = ppd->cntrs;
11605
11606                 for (i = 0; i < PORT_CNTR_LAST; i++) {
11607                         entry = &port_cntrs[i];
11608                         hfi1_cdbg(CNTR, "reading %s", entry->name);
11609                         if (entry->flags & CNTR_DISABLED) {
11610                                 /* Nothing */
11611                                 hfi1_cdbg(CNTR, "\tDisabled\n");
11612                                 continue;
11613                         }
11614
11615                         if (entry->flags & CNTR_VL) {
11616                                 hfi1_cdbg(CNTR, "\tPer VL");
11617                                 for (j = 0; j < C_VL_COUNT; j++) {
11618                                         val = entry->rw_cntr(entry, ppd, j,
11619                                                                CNTR_MODE_R,
11620                                                                0);
11621                                         hfi1_cdbg(
11622                                            CNTR,
11623                                            "\t\tRead 0x%llx for %d",
11624                                            val, j);
11625                                         ppd->cntrs[entry->offset + j] = val;
11626                                 }
11627                         } else {
11628                                 val = entry->rw_cntr(entry, ppd,
11629                                                        CNTR_INVALID_VL,
11630                                                        CNTR_MODE_R,
11631                                                        0);
11632                                 ppd->cntrs[entry->offset] = val;
11633                                 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
11634                         }
11635                 }
11636         }
11637         return ret;
11638 }
11639
11640 static void free_cntrs(struct hfi1_devdata *dd)
11641 {
11642         struct hfi1_pportdata *ppd;
11643         int i;
11644
11645         if (dd->synth_stats_timer.data)
11646                 del_timer_sync(&dd->synth_stats_timer);
11647         dd->synth_stats_timer.data = 0;
11648         ppd = (struct hfi1_pportdata *)(dd + 1);
11649         for (i = 0; i < dd->num_pports; i++, ppd++) {
11650                 kfree(ppd->cntrs);
11651                 kfree(ppd->scntrs);
11652                 free_percpu(ppd->ibport_data.rvp.rc_acks);
11653                 free_percpu(ppd->ibport_data.rvp.rc_qacks);
11654                 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
11655                 ppd->cntrs = NULL;
11656                 ppd->scntrs = NULL;
11657                 ppd->ibport_data.rvp.rc_acks = NULL;
11658                 ppd->ibport_data.rvp.rc_qacks = NULL;
11659                 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
11660         }
11661         kfree(dd->portcntrnames);
11662         dd->portcntrnames = NULL;
11663         kfree(dd->cntrs);
11664         dd->cntrs = NULL;
11665         kfree(dd->scntrs);
11666         dd->scntrs = NULL;
11667         kfree(dd->cntrnames);
11668         dd->cntrnames = NULL;
11669 }
11670
11671 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
11672 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
11673
11674 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
11675                               u64 *psval, void *context, int vl)
11676 {
11677         u64 val;
11678         u64 sval = *psval;
11679
11680         if (entry->flags & CNTR_DISABLED) {
11681                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11682                 return 0;
11683         }
11684
11685         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11686
11687         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
11688
11689         /* If its a synthetic counter there is more work we need to do */
11690         if (entry->flags & CNTR_SYNTH) {
11691                 if (sval == CNTR_MAX) {
11692                         /* No need to read already saturated */
11693                         return CNTR_MAX;
11694                 }
11695
11696                 if (entry->flags & CNTR_32BIT) {
11697                         /* 32bit counters can wrap multiple times */
11698                         u64 upper = sval >> 32;
11699                         u64 lower = (sval << 32) >> 32;
11700
11701                         if (lower > val) { /* hw wrapped */
11702                                 if (upper == CNTR_32BIT_MAX)
11703                                         val = CNTR_MAX;
11704                                 else
11705                                         upper++;
11706                         }
11707
11708                         if (val != CNTR_MAX)
11709                                 val = (upper << 32) | val;
11710
11711                 } else {
11712                         /* If we rolled we are saturated */
11713                         if ((val < sval) || (val > CNTR_MAX))
11714                                 val = CNTR_MAX;
11715                 }
11716         }
11717
11718         *psval = val;
11719
11720         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11721
11722         return val;
11723 }
11724
11725 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
11726                                struct cntr_entry *entry,
11727                                u64 *psval, void *context, int vl, u64 data)
11728 {
11729         u64 val;
11730
11731         if (entry->flags & CNTR_DISABLED) {
11732                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
11733                 return 0;
11734         }
11735
11736         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
11737
11738         if (entry->flags & CNTR_SYNTH) {
11739                 *psval = data;
11740                 if (entry->flags & CNTR_32BIT) {
11741                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11742                                              (data << 32) >> 32);
11743                         val = data; /* return the full 64bit value */
11744                 } else {
11745                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
11746                                              data);
11747                 }
11748         } else {
11749                 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
11750         }
11751
11752         *psval = val;
11753
11754         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
11755
11756         return val;
11757 }
11758
11759 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
11760 {
11761         struct cntr_entry *entry;
11762         u64 *sval;
11763
11764         entry = &dev_cntrs[index];
11765         sval = dd->scntrs + entry->offset;
11766
11767         if (vl != CNTR_INVALID_VL)
11768                 sval += vl;
11769
11770         return read_dev_port_cntr(dd, entry, sval, dd, vl);
11771 }
11772
11773 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
11774 {
11775         struct cntr_entry *entry;
11776         u64 *sval;
11777
11778         entry = &dev_cntrs[index];
11779         sval = dd->scntrs + entry->offset;
11780
11781         if (vl != CNTR_INVALID_VL)
11782                 sval += vl;
11783
11784         return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
11785 }
11786
11787 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
11788 {
11789         struct cntr_entry *entry;
11790         u64 *sval;
11791
11792         entry = &port_cntrs[index];
11793         sval = ppd->scntrs + entry->offset;
11794
11795         if (vl != CNTR_INVALID_VL)
11796                 sval += vl;
11797
11798         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11799             (index <= C_RCV_HDR_OVF_LAST)) {
11800                 /* We do not want to bother for disabled contexts */
11801                 return 0;
11802         }
11803
11804         return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
11805 }
11806
11807 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
11808 {
11809         struct cntr_entry *entry;
11810         u64 *sval;
11811
11812         entry = &port_cntrs[index];
11813         sval = ppd->scntrs + entry->offset;
11814
11815         if (vl != CNTR_INVALID_VL)
11816                 sval += vl;
11817
11818         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
11819             (index <= C_RCV_HDR_OVF_LAST)) {
11820                 /* We do not want to bother for disabled contexts */
11821                 return 0;
11822         }
11823
11824         return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
11825 }
11826
11827 static void update_synth_timer(unsigned long opaque)
11828 {
11829         u64 cur_tx;
11830         u64 cur_rx;
11831         u64 total_flits;
11832         u8 update = 0;
11833         int i, j, vl;
11834         struct hfi1_pportdata *ppd;
11835         struct cntr_entry *entry;
11836
11837         struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
11838
11839         /*
11840          * Rather than keep beating on the CSRs pick a minimal set that we can
11841          * check to watch for potential roll over. We can do this by looking at
11842          * the number of flits sent/recv. If the total flits exceeds 32bits then
11843          * we have to iterate all the counters and update.
11844          */
11845         entry = &dev_cntrs[C_DC_RCV_FLITS];
11846         cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11847
11848         entry = &dev_cntrs[C_DC_XMIT_FLITS];
11849         cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
11850
11851         hfi1_cdbg(
11852             CNTR,
11853             "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
11854             dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
11855
11856         if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
11857                 /*
11858                  * May not be strictly necessary to update but it won't hurt and
11859                  * simplifies the logic here.
11860                  */
11861                 update = 1;
11862                 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
11863                           dd->unit);
11864         } else {
11865                 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
11866                 hfi1_cdbg(CNTR,
11867                           "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
11868                           total_flits, (u64)CNTR_32BIT_MAX);
11869                 if (total_flits >= CNTR_32BIT_MAX) {
11870                         hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
11871                                   dd->unit);
11872                         update = 1;
11873                 }
11874         }
11875
11876         if (update) {
11877                 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
11878                 for (i = 0; i < DEV_CNTR_LAST; i++) {
11879                         entry = &dev_cntrs[i];
11880                         if (entry->flags & CNTR_VL) {
11881                                 for (vl = 0; vl < C_VL_COUNT; vl++)
11882                                         read_dev_cntr(dd, i, vl);
11883                         } else {
11884                                 read_dev_cntr(dd, i, CNTR_INVALID_VL);
11885                         }
11886                 }
11887                 ppd = (struct hfi1_pportdata *)(dd + 1);
11888                 for (i = 0; i < dd->num_pports; i++, ppd++) {
11889                         for (j = 0; j < PORT_CNTR_LAST; j++) {
11890                                 entry = &port_cntrs[j];
11891                                 if (entry->flags & CNTR_VL) {
11892                                         for (vl = 0; vl < C_VL_COUNT; vl++)
11893                                                 read_port_cntr(ppd, j, vl);
11894                                 } else {
11895                                         read_port_cntr(ppd, j, CNTR_INVALID_VL);
11896                                 }
11897                         }
11898                 }
11899
11900                 /*
11901                  * We want the value in the register. The goal is to keep track
11902                  * of the number of "ticks" not the counter value. In other
11903                  * words if the register rolls we want to notice it and go ahead
11904                  * and force an update.
11905                  */
11906                 entry = &dev_cntrs[C_DC_XMIT_FLITS];
11907                 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11908                                                 CNTR_MODE_R, 0);
11909
11910                 entry = &dev_cntrs[C_DC_RCV_FLITS];
11911                 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
11912                                                 CNTR_MODE_R, 0);
11913
11914                 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
11915                           dd->unit, dd->last_tx, dd->last_rx);
11916
11917         } else {
11918                 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
11919         }
11920
11921         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
11922 }
11923
11924 #define C_MAX_NAME 13 /* 12 chars + one for /0 */
11925 static int init_cntrs(struct hfi1_devdata *dd)
11926 {
11927         int i, rcv_ctxts, j;
11928         size_t sz;
11929         char *p;
11930         char name[C_MAX_NAME];
11931         struct hfi1_pportdata *ppd;
11932         const char *bit_type_32 = ",32";
11933         const int bit_type_32_sz = strlen(bit_type_32);
11934
11935         /* set up the stats timer; the add_timer is done at the end */
11936         setup_timer(&dd->synth_stats_timer, update_synth_timer,
11937                     (unsigned long)dd);
11938
11939         /***********************/
11940         /* per device counters */
11941         /***********************/
11942
11943         /* size names and determine how many we have*/
11944         dd->ndevcntrs = 0;
11945         sz = 0;
11946
11947         for (i = 0; i < DEV_CNTR_LAST; i++) {
11948                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
11949                         hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
11950                         continue;
11951                 }
11952
11953                 if (dev_cntrs[i].flags & CNTR_VL) {
11954                         dev_cntrs[i].offset = dd->ndevcntrs;
11955                         for (j = 0; j < C_VL_COUNT; j++) {
11956                                 snprintf(name, C_MAX_NAME, "%s%d",
11957                                          dev_cntrs[i].name, vl_from_idx(j));
11958                                 sz += strlen(name);
11959                                 /* Add ",32" for 32-bit counters */
11960                                 if (dev_cntrs[i].flags & CNTR_32BIT)
11961                                         sz += bit_type_32_sz;
11962                                 sz++;
11963                                 dd->ndevcntrs++;
11964                         }
11965                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
11966                         dev_cntrs[i].offset = dd->ndevcntrs;
11967                         for (j = 0; j < dd->chip_sdma_engines; j++) {
11968                                 snprintf(name, C_MAX_NAME, "%s%d",
11969                                          dev_cntrs[i].name, j);
11970                                 sz += strlen(name);
11971                                 /* Add ",32" for 32-bit counters */
11972                                 if (dev_cntrs[i].flags & CNTR_32BIT)
11973                                         sz += bit_type_32_sz;
11974                                 sz++;
11975                                 dd->ndevcntrs++;
11976                         }
11977                 } else {
11978                         /* +1 for newline. */
11979                         sz += strlen(dev_cntrs[i].name) + 1;
11980                         /* Add ",32" for 32-bit counters */
11981                         if (dev_cntrs[i].flags & CNTR_32BIT)
11982                                 sz += bit_type_32_sz;
11983                         dev_cntrs[i].offset = dd->ndevcntrs;
11984                         dd->ndevcntrs++;
11985                 }
11986         }
11987
11988         /* allocate space for the counter values */
11989         dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11990         if (!dd->cntrs)
11991                 goto bail;
11992
11993         dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
11994         if (!dd->scntrs)
11995                 goto bail;
11996
11997         /* allocate space for the counter names */
11998         dd->cntrnameslen = sz;
11999         dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12000         if (!dd->cntrnames)
12001                 goto bail;
12002
12003         /* fill in the names */
12004         for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12005                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12006                         /* Nothing */
12007                 } else if (dev_cntrs[i].flags & CNTR_VL) {
12008                         for (j = 0; j < C_VL_COUNT; j++) {
12009                                 snprintf(name, C_MAX_NAME, "%s%d",
12010                                          dev_cntrs[i].name,
12011                                          vl_from_idx(j));
12012                                 memcpy(p, name, strlen(name));
12013                                 p += strlen(name);
12014
12015                                 /* Counter is 32 bits */
12016                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12017                                         memcpy(p, bit_type_32, bit_type_32_sz);
12018                                         p += bit_type_32_sz;
12019                                 }
12020
12021                                 *p++ = '\n';
12022                         }
12023                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12024                         for (j = 0; j < dd->chip_sdma_engines; j++) {
12025                                 snprintf(name, C_MAX_NAME, "%s%d",
12026                                          dev_cntrs[i].name, j);
12027                                 memcpy(p, name, strlen(name));
12028                                 p += strlen(name);
12029
12030                                 /* Counter is 32 bits */
12031                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12032                                         memcpy(p, bit_type_32, bit_type_32_sz);
12033                                         p += bit_type_32_sz;
12034                                 }
12035
12036                                 *p++ = '\n';
12037                         }
12038                 } else {
12039                         memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12040                         p += strlen(dev_cntrs[i].name);
12041
12042                         /* Counter is 32 bits */
12043                         if (dev_cntrs[i].flags & CNTR_32BIT) {
12044                                 memcpy(p, bit_type_32, bit_type_32_sz);
12045                                 p += bit_type_32_sz;
12046                         }
12047
12048                         *p++ = '\n';
12049                 }
12050         }
12051
12052         /*********************/
12053         /* per port counters */
12054         /*********************/
12055
12056         /*
12057          * Go through the counters for the overflows and disable the ones we
12058          * don't need. This varies based on platform so we need to do it
12059          * dynamically here.
12060          */
12061         rcv_ctxts = dd->num_rcv_contexts;
12062         for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12063              i <= C_RCV_HDR_OVF_LAST; i++) {
12064                 port_cntrs[i].flags |= CNTR_DISABLED;
12065         }
12066
12067         /* size port counter names and determine how many we have*/
12068         sz = 0;
12069         dd->nportcntrs = 0;
12070         for (i = 0; i < PORT_CNTR_LAST; i++) {
12071                 if (port_cntrs[i].flags & CNTR_DISABLED) {
12072                         hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12073                         continue;
12074                 }
12075
12076                 if (port_cntrs[i].flags & CNTR_VL) {
12077                         port_cntrs[i].offset = dd->nportcntrs;
12078                         for (j = 0; j < C_VL_COUNT; j++) {
12079                                 snprintf(name, C_MAX_NAME, "%s%d",
12080                                          port_cntrs[i].name, vl_from_idx(j));
12081                                 sz += strlen(name);
12082                                 /* Add ",32" for 32-bit counters */
12083                                 if (port_cntrs[i].flags & CNTR_32BIT)
12084                                         sz += bit_type_32_sz;
12085                                 sz++;
12086                                 dd->nportcntrs++;
12087                         }
12088                 } else {
12089                         /* +1 for newline */
12090                         sz += strlen(port_cntrs[i].name) + 1;
12091                         /* Add ",32" for 32-bit counters */
12092                         if (port_cntrs[i].flags & CNTR_32BIT)
12093                                 sz += bit_type_32_sz;
12094                         port_cntrs[i].offset = dd->nportcntrs;
12095                         dd->nportcntrs++;
12096                 }
12097         }
12098
12099         /* allocate space for the counter names */
12100         dd->portcntrnameslen = sz;
12101         dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12102         if (!dd->portcntrnames)
12103                 goto bail;
12104
12105         /* fill in port cntr names */
12106         for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12107                 if (port_cntrs[i].flags & CNTR_DISABLED)
12108                         continue;
12109
12110                 if (port_cntrs[i].flags & CNTR_VL) {
12111                         for (j = 0; j < C_VL_COUNT; j++) {
12112                                 snprintf(name, C_MAX_NAME, "%s%d",
12113                                          port_cntrs[i].name, vl_from_idx(j));
12114                                 memcpy(p, name, strlen(name));
12115                                 p += strlen(name);
12116
12117                                 /* Counter is 32 bits */
12118                                 if (port_cntrs[i].flags & CNTR_32BIT) {
12119                                         memcpy(p, bit_type_32, bit_type_32_sz);
12120                                         p += bit_type_32_sz;
12121                                 }
12122
12123                                 *p++ = '\n';
12124                         }
12125                 } else {
12126                         memcpy(p, port_cntrs[i].name,
12127                                strlen(port_cntrs[i].name));
12128                         p += strlen(port_cntrs[i].name);
12129
12130                         /* Counter is 32 bits */
12131                         if (port_cntrs[i].flags & CNTR_32BIT) {
12132                                 memcpy(p, bit_type_32, bit_type_32_sz);
12133                                 p += bit_type_32_sz;
12134                         }
12135
12136                         *p++ = '\n';
12137                 }
12138         }
12139
12140         /* allocate per port storage for counter values */
12141         ppd = (struct hfi1_pportdata *)(dd + 1);
12142         for (i = 0; i < dd->num_pports; i++, ppd++) {
12143                 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12144                 if (!ppd->cntrs)
12145                         goto bail;
12146
12147                 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12148                 if (!ppd->scntrs)
12149                         goto bail;
12150         }
12151
12152         /* CPU counters need to be allocated and zeroed */
12153         if (init_cpu_counters(dd))
12154                 goto bail;
12155
12156         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12157         return 0;
12158 bail:
12159         free_cntrs(dd);
12160         return -ENOMEM;
12161 }
12162
12163 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12164 {
12165         switch (chip_lstate) {
12166         default:
12167                 dd_dev_err(dd,
12168                            "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12169                            chip_lstate);
12170                 /* fall through */
12171         case LSTATE_DOWN:
12172                 return IB_PORT_DOWN;
12173         case LSTATE_INIT:
12174                 return IB_PORT_INIT;
12175         case LSTATE_ARMED:
12176                 return IB_PORT_ARMED;
12177         case LSTATE_ACTIVE:
12178                 return IB_PORT_ACTIVE;
12179         }
12180 }
12181
12182 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12183 {
12184         /* look at the HFI meta-states only */
12185         switch (chip_pstate & 0xf0) {
12186         default:
12187                 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12188                            chip_pstate);
12189                 /* fall through */
12190         case PLS_DISABLED:
12191                 return IB_PORTPHYSSTATE_DISABLED;
12192         case PLS_OFFLINE:
12193                 return OPA_PORTPHYSSTATE_OFFLINE;
12194         case PLS_POLLING:
12195                 return IB_PORTPHYSSTATE_POLLING;
12196         case PLS_CONFIGPHY:
12197                 return IB_PORTPHYSSTATE_TRAINING;
12198         case PLS_LINKUP:
12199                 return IB_PORTPHYSSTATE_LINKUP;
12200         case PLS_PHYTEST:
12201                 return IB_PORTPHYSSTATE_PHY_TEST;
12202         }
12203 }
12204
12205 /* return the OPA port logical state name */
12206 const char *opa_lstate_name(u32 lstate)
12207 {
12208         static const char * const port_logical_names[] = {
12209                 "PORT_NOP",
12210                 "PORT_DOWN",
12211                 "PORT_INIT",
12212                 "PORT_ARMED",
12213                 "PORT_ACTIVE",
12214                 "PORT_ACTIVE_DEFER",
12215         };
12216         if (lstate < ARRAY_SIZE(port_logical_names))
12217                 return port_logical_names[lstate];
12218         return "unknown";
12219 }
12220
12221 /* return the OPA port physical state name */
12222 const char *opa_pstate_name(u32 pstate)
12223 {
12224         static const char * const port_physical_names[] = {
12225                 "PHYS_NOP",
12226                 "reserved1",
12227                 "PHYS_POLL",
12228                 "PHYS_DISABLED",
12229                 "PHYS_TRAINING",
12230                 "PHYS_LINKUP",
12231                 "PHYS_LINK_ERR_RECOVER",
12232                 "PHYS_PHY_TEST",
12233                 "reserved8",
12234                 "PHYS_OFFLINE",
12235                 "PHYS_GANGED",
12236                 "PHYS_TEST",
12237         };
12238         if (pstate < ARRAY_SIZE(port_physical_names))
12239                 return port_physical_names[pstate];
12240         return "unknown";
12241 }
12242
12243 /*
12244  * Read the hardware link state and set the driver's cached value of it.
12245  * Return the (new) current value.
12246  */
12247 u32 get_logical_state(struct hfi1_pportdata *ppd)
12248 {
12249         u32 new_state;
12250
12251         new_state = chip_to_opa_lstate(ppd->dd, read_logical_state(ppd->dd));
12252         if (new_state != ppd->lstate) {
12253                 dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12254                             opa_lstate_name(new_state), new_state);
12255                 ppd->lstate = new_state;
12256         }
12257         /*
12258          * Set port status flags in the page mapped into userspace
12259          * memory. Do it here to ensure a reliable state - this is
12260          * the only function called by all state handling code.
12261          * Always set the flags due to the fact that the cache value
12262          * might have been changed explicitly outside of this
12263          * function.
12264          */
12265         if (ppd->statusp) {
12266                 switch (ppd->lstate) {
12267                 case IB_PORT_DOWN:
12268                 case IB_PORT_INIT:
12269                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12270                                            HFI1_STATUS_IB_READY);
12271                         break;
12272                 case IB_PORT_ARMED:
12273                         *ppd->statusp |= HFI1_STATUS_IB_CONF;
12274                         break;
12275                 case IB_PORT_ACTIVE:
12276                         *ppd->statusp |= HFI1_STATUS_IB_READY;
12277                         break;
12278                 }
12279         }
12280         return ppd->lstate;
12281 }
12282
12283 /**
12284  * wait_logical_linkstate - wait for an IB link state change to occur
12285  * @ppd: port device
12286  * @state: the state to wait for
12287  * @msecs: the number of milliseconds to wait
12288  *
12289  * Wait up to msecs milliseconds for IB link state change to occur.
12290  * For now, take the easy polling route.
12291  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12292  */
12293 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12294                                   int msecs)
12295 {
12296         unsigned long timeout;
12297
12298         timeout = jiffies + msecs_to_jiffies(msecs);
12299         while (1) {
12300                 if (get_logical_state(ppd) == state)
12301                         return 0;
12302                 if (time_after(jiffies, timeout))
12303                         break;
12304                 msleep(20);
12305         }
12306         dd_dev_err(ppd->dd, "timeout waiting for link state 0x%x\n", state);
12307
12308         return -ETIMEDOUT;
12309 }
12310
12311 u8 hfi1_ibphys_portstate(struct hfi1_pportdata *ppd)
12312 {
12313         u32 pstate;
12314         u32 ib_pstate;
12315
12316         pstate = read_physical_state(ppd->dd);
12317         ib_pstate = chip_to_opa_pstate(ppd->dd, pstate);
12318         if (ppd->last_pstate != ib_pstate) {
12319                 dd_dev_info(ppd->dd,
12320                             "%s: physical state changed to %s (0x%x), phy 0x%x\n",
12321                             __func__, opa_pstate_name(ib_pstate), ib_pstate,
12322                             pstate);
12323                 ppd->last_pstate = ib_pstate;
12324         }
12325         return ib_pstate;
12326 }
12327
12328 /*
12329  * Read/modify/write ASIC_QSFP register bits as selected by mask
12330  * data: 0 or 1 in the positions depending on what needs to be written
12331  * dir: 0 for read, 1 for write
12332  * mask: select by setting
12333  *      I2CCLK  (bit 0)
12334  *      I2CDATA (bit 1)
12335  */
12336 u64 hfi1_gpio_mod(struct hfi1_devdata *dd, u32 target, u32 data, u32 dir,
12337                   u32 mask)
12338 {
12339         u64 qsfp_oe, target_oe;
12340
12341         target_oe = target ? ASIC_QSFP2_OE : ASIC_QSFP1_OE;
12342         if (mask) {
12343                 /* We are writing register bits, so lock access */
12344                 dir &= mask;
12345                 data &= mask;
12346
12347                 qsfp_oe = read_csr(dd, target_oe);
12348                 qsfp_oe = (qsfp_oe & ~(u64)mask) | (u64)dir;
12349                 write_csr(dd, target_oe, qsfp_oe);
12350         }
12351         /* We are exclusively reading bits here, but it is unlikely
12352          * we'll get valid data when we set the direction of the pin
12353          * in the same call, so read should call this function again
12354          * to get valid data
12355          */
12356         return read_csr(dd, target ? ASIC_QSFP2_IN : ASIC_QSFP1_IN);
12357 }
12358
12359 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12360 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12361
12362 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12363 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12364
12365 int hfi1_init_ctxt(struct send_context *sc)
12366 {
12367         if (sc) {
12368                 struct hfi1_devdata *dd = sc->dd;
12369                 u64 reg;
12370                 u8 set = (sc->type == SC_USER ?
12371                           HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
12372                           HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
12373                 reg = read_kctxt_csr(dd, sc->hw_context,
12374                                      SEND_CTXT_CHECK_ENABLE);
12375                 if (set)
12376                         CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
12377                 else
12378                         SET_STATIC_RATE_CONTROL_SMASK(reg);
12379                 write_kctxt_csr(dd, sc->hw_context,
12380                                 SEND_CTXT_CHECK_ENABLE, reg);
12381         }
12382         return 0;
12383 }
12384
12385 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
12386 {
12387         int ret = 0;
12388         u64 reg;
12389
12390         if (dd->icode != ICODE_RTL_SILICON) {
12391                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
12392                         dd_dev_info(dd, "%s: tempsense not supported by HW\n",
12393                                     __func__);
12394                 return -EINVAL;
12395         }
12396         reg = read_csr(dd, ASIC_STS_THERM);
12397         temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
12398                       ASIC_STS_THERM_CURR_TEMP_MASK);
12399         temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
12400                         ASIC_STS_THERM_LO_TEMP_MASK);
12401         temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
12402                         ASIC_STS_THERM_HI_TEMP_MASK);
12403         temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
12404                           ASIC_STS_THERM_CRIT_TEMP_MASK);
12405         /* triggers is a 3-bit value - 1 bit per trigger. */
12406         temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
12407
12408         return ret;
12409 }
12410
12411 /* ========================================================================= */
12412
12413 /*
12414  * Enable/disable chip from delivering interrupts.
12415  */
12416 void set_intr_state(struct hfi1_devdata *dd, u32 enable)
12417 {
12418         int i;
12419
12420         /*
12421          * In HFI, the mask needs to be 1 to allow interrupts.
12422          */
12423         if (enable) {
12424                 /* enable all interrupts */
12425                 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12426                         write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
12427
12428                 init_qsfp_int(dd);
12429         } else {
12430                 for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12431                         write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
12432         }
12433 }
12434
12435 /*
12436  * Clear all interrupt sources on the chip.
12437  */
12438 static void clear_all_interrupts(struct hfi1_devdata *dd)
12439 {
12440         int i;
12441
12442         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12443                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
12444
12445         write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
12446         write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
12447         write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
12448         write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
12449         write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
12450         write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
12451         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
12452         for (i = 0; i < dd->chip_send_contexts; i++)
12453                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
12454         for (i = 0; i < dd->chip_sdma_engines; i++)
12455                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
12456
12457         write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
12458         write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
12459         write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
12460 }
12461
12462 /* Move to pcie.c? */
12463 static void disable_intx(struct pci_dev *pdev)
12464 {
12465         pci_intx(pdev, 0);
12466 }
12467
12468 static void clean_up_interrupts(struct hfi1_devdata *dd)
12469 {
12470         int i;
12471
12472         /* remove irqs - must happen before disabling/turning off */
12473         if (dd->num_msix_entries) {
12474                 /* MSI-X */
12475                 struct hfi1_msix_entry *me = dd->msix_entries;
12476
12477                 for (i = 0; i < dd->num_msix_entries; i++, me++) {
12478                         if (!me->arg) /* => no irq, no affinity */
12479                                 continue;
12480                         hfi1_put_irq_affinity(dd, &dd->msix_entries[i]);
12481                         free_irq(me->msix.vector, me->arg);
12482                 }
12483         } else {
12484                 /* INTx */
12485                 if (dd->requested_intx_irq) {
12486                         free_irq(dd->pcidev->irq, dd);
12487                         dd->requested_intx_irq = 0;
12488                 }
12489         }
12490
12491         /* turn off interrupts */
12492         if (dd->num_msix_entries) {
12493                 /* MSI-X */
12494                 pci_disable_msix(dd->pcidev);
12495         } else {
12496                 /* INTx */
12497                 disable_intx(dd->pcidev);
12498         }
12499
12500         /* clean structures */
12501         kfree(dd->msix_entries);
12502         dd->msix_entries = NULL;
12503         dd->num_msix_entries = 0;
12504 }
12505
12506 /*
12507  * Remap the interrupt source from the general handler to the given MSI-X
12508  * interrupt.
12509  */
12510 static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12511 {
12512         u64 reg;
12513         int m, n;
12514
12515         /* clear from the handled mask of the general interrupt */
12516         m = isrc / 64;
12517         n = isrc % 64;
12518         dd->gi_mask[m] &= ~((u64)1 << n);
12519
12520         /* direct the chip source to the given MSI-X interrupt */
12521         m = isrc / 8;
12522         n = isrc % 8;
12523         reg = read_csr(dd, CCE_INT_MAP + (8 * m));
12524         reg &= ~((u64)0xff << (8 * n));
12525         reg |= ((u64)msix_intr & 0xff) << (8 * n);
12526         write_csr(dd, CCE_INT_MAP + (8 * m), reg);
12527 }
12528
12529 static void remap_sdma_interrupts(struct hfi1_devdata *dd,
12530                                   int engine, int msix_intr)
12531 {
12532         /*
12533          * SDMA engine interrupt sources grouped by type, rather than
12534          * engine.  Per-engine interrupts are as follows:
12535          *      SDMA
12536          *      SDMAProgress
12537          *      SDMAIdle
12538          */
12539         remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
12540                    msix_intr);
12541         remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
12542                    msix_intr);
12543         remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
12544                    msix_intr);
12545 }
12546
12547 static int request_intx_irq(struct hfi1_devdata *dd)
12548 {
12549         int ret;
12550
12551         snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
12552                  dd->unit);
12553         ret = request_irq(dd->pcidev->irq, general_interrupt,
12554                           IRQF_SHARED, dd->intx_name, dd);
12555         if (ret)
12556                 dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
12557                            ret);
12558         else
12559                 dd->requested_intx_irq = 1;
12560         return ret;
12561 }
12562
12563 static int request_msix_irqs(struct hfi1_devdata *dd)
12564 {
12565         int first_general, last_general;
12566         int first_sdma, last_sdma;
12567         int first_rx, last_rx;
12568         int i, ret = 0;
12569
12570         /* calculate the ranges we are going to use */
12571         first_general = 0;
12572         last_general = first_general + 1;
12573         first_sdma = last_general;
12574         last_sdma = first_sdma + dd->num_sdma;
12575         first_rx = last_sdma;
12576         last_rx = first_rx + dd->n_krcv_queues;
12577
12578         /*
12579          * Sanity check - the code expects all SDMA chip source
12580          * interrupts to be in the same CSR, starting at bit 0.  Verify
12581          * that this is true by checking the bit location of the start.
12582          */
12583         BUILD_BUG_ON(IS_SDMA_START % 64);
12584
12585         for (i = 0; i < dd->num_msix_entries; i++) {
12586                 struct hfi1_msix_entry *me = &dd->msix_entries[i];
12587                 const char *err_info;
12588                 irq_handler_t handler;
12589                 irq_handler_t thread = NULL;
12590                 void *arg;
12591                 int idx;
12592                 struct hfi1_ctxtdata *rcd = NULL;
12593                 struct sdma_engine *sde = NULL;
12594
12595                 /* obtain the arguments to request_irq */
12596                 if (first_general <= i && i < last_general) {
12597                         idx = i - first_general;
12598                         handler = general_interrupt;
12599                         arg = dd;
12600                         snprintf(me->name, sizeof(me->name),
12601                                  DRIVER_NAME "_%d", dd->unit);
12602                         err_info = "general";
12603                         me->type = IRQ_GENERAL;
12604                 } else if (first_sdma <= i && i < last_sdma) {
12605                         idx = i - first_sdma;
12606                         sde = &dd->per_sdma[idx];
12607                         handler = sdma_interrupt;
12608                         arg = sde;
12609                         snprintf(me->name, sizeof(me->name),
12610                                  DRIVER_NAME "_%d sdma%d", dd->unit, idx);
12611                         err_info = "sdma";
12612                         remap_sdma_interrupts(dd, idx, i);
12613                         me->type = IRQ_SDMA;
12614                 } else if (first_rx <= i && i < last_rx) {
12615                         idx = i - first_rx;
12616                         rcd = dd->rcd[idx];
12617                         /* no interrupt if no rcd */
12618                         if (!rcd)
12619                                 continue;
12620                         /*
12621                          * Set the interrupt register and mask for this
12622                          * context's interrupt.
12623                          */
12624                         rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
12625                         rcd->imask = ((u64)1) <<
12626                                         ((IS_RCVAVAIL_START + idx) % 64);
12627                         handler = receive_context_interrupt;
12628                         thread = receive_context_thread;
12629                         arg = rcd;
12630                         snprintf(me->name, sizeof(me->name),
12631                                  DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
12632                         err_info = "receive context";
12633                         remap_intr(dd, IS_RCVAVAIL_START + idx, i);
12634                         me->type = IRQ_RCVCTXT;
12635                 } else {
12636                         /* not in our expected range - complain, then
12637                          * ignore it
12638                          */
12639                         dd_dev_err(dd,
12640                                    "Unexpected extra MSI-X interrupt %d\n", i);
12641                         continue;
12642                 }
12643                 /* no argument, no interrupt */
12644                 if (!arg)
12645                         continue;
12646                 /* make sure the name is terminated */
12647                 me->name[sizeof(me->name) - 1] = 0;
12648
12649                 ret = request_threaded_irq(me->msix.vector, handler, thread, 0,
12650                                            me->name, arg);
12651                 if (ret) {
12652                         dd_dev_err(dd,
12653                                    "unable to allocate %s interrupt, vector %d, index %d, err %d\n",
12654                                    err_info, me->msix.vector, idx, ret);
12655                         return ret;
12656                 }
12657                 /*
12658                  * assign arg after request_irq call, so it will be
12659                  * cleaned up
12660                  */
12661                 me->arg = arg;
12662
12663                 ret = hfi1_get_irq_affinity(dd, me);
12664                 if (ret)
12665                         dd_dev_err(dd,
12666                                    "unable to pin IRQ %d\n", ret);
12667         }
12668
12669         return ret;
12670 }
12671
12672 /*
12673  * Set the general handler to accept all interrupts, remap all
12674  * chip interrupts back to MSI-X 0.
12675  */
12676 static void reset_interrupts(struct hfi1_devdata *dd)
12677 {
12678         int i;
12679
12680         /* all interrupts handled by the general handler */
12681         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
12682                 dd->gi_mask[i] = ~(u64)0;
12683
12684         /* all chip interrupts map to MSI-X 0 */
12685         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12686                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12687 }
12688
12689 static int set_up_interrupts(struct hfi1_devdata *dd)
12690 {
12691         struct hfi1_msix_entry *entries;
12692         u32 total, request;
12693         int i, ret;
12694         int single_interrupt = 0; /* we expect to have all the interrupts */
12695
12696         /*
12697          * Interrupt count:
12698          *      1 general, "slow path" interrupt (includes the SDMA engines
12699          *              slow source, SDMACleanupDone)
12700          *      N interrupts - one per used SDMA engine
12701          *      M interrupt - one per kernel receive context
12702          */
12703         total = 1 + dd->num_sdma + dd->n_krcv_queues;
12704
12705         entries = kcalloc(total, sizeof(*entries), GFP_KERNEL);
12706         if (!entries) {
12707                 ret = -ENOMEM;
12708                 goto fail;
12709         }
12710         /* 1-1 MSI-X entry assignment */
12711         for (i = 0; i < total; i++)
12712                 entries[i].msix.entry = i;
12713
12714         /* ask for MSI-X interrupts */
12715         request = total;
12716         request_msix(dd, &request, entries);
12717
12718         if (request == 0) {
12719                 /* using INTx */
12720                 /* dd->num_msix_entries already zero */
12721                 kfree(entries);
12722                 single_interrupt = 1;
12723                 dd_dev_err(dd, "MSI-X failed, using INTx interrupts\n");
12724         } else {
12725                 /* using MSI-X */
12726                 dd->num_msix_entries = request;
12727                 dd->msix_entries = entries;
12728
12729                 if (request != total) {
12730                         /* using MSI-X, with reduced interrupts */
12731                         dd_dev_err(
12732                                 dd,
12733                                 "cannot handle reduced interrupt case, want %u, got %u\n",
12734                                 total, request);
12735                         ret = -EINVAL;
12736                         goto fail;
12737                 }
12738                 dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
12739         }
12740
12741         /* mask all interrupts */
12742         set_intr_state(dd, 0);
12743         /* clear all pending interrupts */
12744         clear_all_interrupts(dd);
12745
12746         /* reset general handler mask, chip MSI-X mappings */
12747         reset_interrupts(dd);
12748
12749         if (single_interrupt)
12750                 ret = request_intx_irq(dd);
12751         else
12752                 ret = request_msix_irqs(dd);
12753         if (ret)
12754                 goto fail;
12755
12756         return 0;
12757
12758 fail:
12759         clean_up_interrupts(dd);
12760         return ret;
12761 }
12762
12763 /*
12764  * Set up context values in dd.  Sets:
12765  *
12766  *      num_rcv_contexts - number of contexts being used
12767  *      n_krcv_queues - number of kernel contexts
12768  *      first_user_ctxt - first non-kernel context in array of contexts
12769  *      freectxts  - number of free user contexts
12770  *      num_send_contexts - number of PIO send contexts being used
12771  */
12772 static int set_up_context_variables(struct hfi1_devdata *dd)
12773 {
12774         int num_kernel_contexts;
12775         int total_contexts;
12776         int ret;
12777         unsigned ngroups;
12778         int qos_rmt_count;
12779         int user_rmt_reduced;
12780
12781         /*
12782          * Kernel receive contexts:
12783          * - min of 2 or 1 context/numa (excluding control context)
12784          * - Context 0 - control context (VL15/multicast/error)
12785          * - Context 1 - first kernel context
12786          * - Context 2 - second kernel context
12787          * ...
12788          */
12789         if (n_krcvqs)
12790                 /*
12791                  * n_krcvqs is the sum of module parameter kernel receive
12792                  * contexts, krcvqs[].  It does not include the control
12793                  * context, so add that.
12794                  */
12795                 num_kernel_contexts = n_krcvqs + 1;
12796         else
12797                 num_kernel_contexts = num_online_nodes() + 1;
12798         num_kernel_contexts =
12799                 max_t(int, MIN_KERNEL_KCTXTS, num_kernel_contexts);
12800         /*
12801          * Every kernel receive context needs an ACK send context.
12802          * one send context is allocated for each VL{0-7} and VL15
12803          */
12804         if (num_kernel_contexts > (dd->chip_send_contexts - num_vls - 1)) {
12805                 dd_dev_err(dd,
12806                            "Reducing # kernel rcv contexts to: %d, from %d\n",
12807                            (int)(dd->chip_send_contexts - num_vls - 1),
12808                            (int)num_kernel_contexts);
12809                 num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
12810         }
12811         /*
12812          * User contexts:
12813          *      - default to 1 user context per real (non-HT) CPU core if
12814          *        num_user_contexts is negative
12815          */
12816         if (num_user_contexts < 0)
12817                 num_user_contexts =
12818                         cpumask_weight(&dd->affinity->real_cpu_mask);
12819
12820         total_contexts = num_kernel_contexts + num_user_contexts;
12821
12822         /*
12823          * Adjust the counts given a global max.
12824          */
12825         if (total_contexts > dd->chip_rcv_contexts) {
12826                 dd_dev_err(dd,
12827                            "Reducing # user receive contexts to: %d, from %d\n",
12828                            (int)(dd->chip_rcv_contexts - num_kernel_contexts),
12829                            (int)num_user_contexts);
12830                 num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
12831                 /* recalculate */
12832                 total_contexts = num_kernel_contexts + num_user_contexts;
12833         }
12834
12835         /* each user context requires an entry in the RMT */
12836         qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
12837         if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
12838                 user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
12839                 dd_dev_err(dd,
12840                            "RMT size is reducing the number of user receive contexts from %d to %d\n",
12841                            (int)num_user_contexts,
12842                            user_rmt_reduced);
12843                 /* recalculate */
12844                 num_user_contexts = user_rmt_reduced;
12845                 total_contexts = num_kernel_contexts + num_user_contexts;
12846         }
12847
12848         /* the first N are kernel contexts, the rest are user contexts */
12849         dd->num_rcv_contexts = total_contexts;
12850         dd->n_krcv_queues = num_kernel_contexts;
12851         dd->first_user_ctxt = num_kernel_contexts;
12852         dd->num_user_contexts = num_user_contexts;
12853         dd->freectxts = num_user_contexts;
12854         dd_dev_info(dd,
12855                     "rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
12856                     (int)dd->chip_rcv_contexts,
12857                     (int)dd->num_rcv_contexts,
12858                     (int)dd->n_krcv_queues,
12859                     (int)dd->num_rcv_contexts - dd->n_krcv_queues);
12860
12861         /*
12862          * Receive array allocation:
12863          *   All RcvArray entries are divided into groups of 8. This
12864          *   is required by the hardware and will speed up writes to
12865          *   consecutive entries by using write-combining of the entire
12866          *   cacheline.
12867          *
12868          *   The number of groups are evenly divided among all contexts.
12869          *   any left over groups will be given to the first N user
12870          *   contexts.
12871          */
12872         dd->rcv_entries.group_size = RCV_INCREMENT;
12873         ngroups = dd->chip_rcv_array_count / dd->rcv_entries.group_size;
12874         dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
12875         dd->rcv_entries.nctxt_extra = ngroups -
12876                 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
12877         dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
12878                     dd->rcv_entries.ngroups,
12879                     dd->rcv_entries.nctxt_extra);
12880         if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
12881             MAX_EAGER_ENTRIES * 2) {
12882                 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
12883                         dd->rcv_entries.group_size;
12884                 dd_dev_info(dd,
12885                             "RcvArray group count too high, change to %u\n",
12886                             dd->rcv_entries.ngroups);
12887                 dd->rcv_entries.nctxt_extra = 0;
12888         }
12889         /*
12890          * PIO send contexts
12891          */
12892         ret = init_sc_pools_and_sizes(dd);
12893         if (ret >= 0) { /* success */
12894                 dd->num_send_contexts = ret;
12895                 dd_dev_info(
12896                         dd,
12897                         "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
12898                         dd->chip_send_contexts,
12899                         dd->num_send_contexts,
12900                         dd->sc_sizes[SC_KERNEL].count,
12901                         dd->sc_sizes[SC_ACK].count,
12902                         dd->sc_sizes[SC_USER].count,
12903                         dd->sc_sizes[SC_VL15].count);
12904                 ret = 0;        /* success */
12905         }
12906
12907         return ret;
12908 }
12909
12910 /*
12911  * Set the device/port partition key table. The MAD code
12912  * will ensure that, at least, the partial management
12913  * partition key is present in the table.
12914  */
12915 static void set_partition_keys(struct hfi1_pportdata *ppd)
12916 {
12917         struct hfi1_devdata *dd = ppd->dd;
12918         u64 reg = 0;
12919         int i;
12920
12921         dd_dev_info(dd, "Setting partition keys\n");
12922         for (i = 0; i < hfi1_get_npkeys(dd); i++) {
12923                 reg |= (ppd->pkeys[i] &
12924                         RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
12925                         ((i % 4) *
12926                          RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
12927                 /* Each register holds 4 PKey values. */
12928                 if ((i % 4) == 3) {
12929                         write_csr(dd, RCV_PARTITION_KEY +
12930                                   ((i - 3) * 2), reg);
12931                         reg = 0;
12932                 }
12933         }
12934
12935         /* Always enable HW pkeys check when pkeys table is set */
12936         add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
12937 }
12938
12939 /*
12940  * These CSRs and memories are uninitialized on reset and must be
12941  * written before reading to set the ECC/parity bits.
12942  *
12943  * NOTE: All user context CSRs that are not mmaped write-only
12944  * (e.g. the TID flows) must be initialized even if the driver never
12945  * reads them.
12946  */
12947 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
12948 {
12949         int i, j;
12950
12951         /* CceIntMap */
12952         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
12953                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
12954
12955         /* SendCtxtCreditReturnAddr */
12956         for (i = 0; i < dd->chip_send_contexts; i++)
12957                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
12958
12959         /* PIO Send buffers */
12960         /* SDMA Send buffers */
12961         /*
12962          * These are not normally read, and (presently) have no method
12963          * to be read, so are not pre-initialized
12964          */
12965
12966         /* RcvHdrAddr */
12967         /* RcvHdrTailAddr */
12968         /* RcvTidFlowTable */
12969         for (i = 0; i < dd->chip_rcv_contexts; i++) {
12970                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
12971                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
12972                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
12973                         write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
12974         }
12975
12976         /* RcvArray */
12977         for (i = 0; i < dd->chip_rcv_array_count; i++)
12978                 write_csr(dd, RCV_ARRAY + (8 * i),
12979                           RCV_ARRAY_RT_WRITE_ENABLE_SMASK);
12980
12981         /* RcvQPMapTable */
12982         for (i = 0; i < 32; i++)
12983                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
12984 }
12985
12986 /*
12987  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
12988  */
12989 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
12990                              u64 ctrl_bits)
12991 {
12992         unsigned long timeout;
12993         u64 reg;
12994
12995         /* is the condition present? */
12996         reg = read_csr(dd, CCE_STATUS);
12997         if ((reg & status_bits) == 0)
12998                 return;
12999
13000         /* clear the condition */
13001         write_csr(dd, CCE_CTRL, ctrl_bits);
13002
13003         /* wait for the condition to clear */
13004         timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13005         while (1) {
13006                 reg = read_csr(dd, CCE_STATUS);
13007                 if ((reg & status_bits) == 0)
13008                         return;
13009                 if (time_after(jiffies, timeout)) {
13010                         dd_dev_err(dd,
13011                                    "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13012                                    status_bits, reg & status_bits);
13013                         return;
13014                 }
13015                 udelay(1);
13016         }
13017 }
13018
13019 /* set CCE CSRs to chip reset defaults */
13020 static void reset_cce_csrs(struct hfi1_devdata *dd)
13021 {
13022         int i;
13023
13024         /* CCE_REVISION read-only */
13025         /* CCE_REVISION2 read-only */
13026         /* CCE_CTRL - bits clear automatically */
13027         /* CCE_STATUS read-only, use CceCtrl to clear */
13028         clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13029         clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13030         clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13031         for (i = 0; i < CCE_NUM_SCRATCH; i++)
13032                 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13033         /* CCE_ERR_STATUS read-only */
13034         write_csr(dd, CCE_ERR_MASK, 0);
13035         write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13036         /* CCE_ERR_FORCE leave alone */
13037         for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13038                 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13039         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13040         /* CCE_PCIE_CTRL leave alone */
13041         for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13042                 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13043                 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13044                           CCE_MSIX_TABLE_UPPER_RESETCSR);
13045         }
13046         for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13047                 /* CCE_MSIX_PBA read-only */
13048                 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13049                 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13050         }
13051         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13052                 write_csr(dd, CCE_INT_MAP, 0);
13053         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13054                 /* CCE_INT_STATUS read-only */
13055                 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13056                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13057                 /* CCE_INT_FORCE leave alone */
13058                 /* CCE_INT_BLOCKED read-only */
13059         }
13060         for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13061                 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13062 }
13063
13064 /* set MISC CSRs to chip reset defaults */
13065 static void reset_misc_csrs(struct hfi1_devdata *dd)
13066 {
13067         int i;
13068
13069         for (i = 0; i < 32; i++) {
13070                 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13071                 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13072                 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13073         }
13074         /*
13075          * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13076          * only be written 128-byte chunks
13077          */
13078         /* init RSA engine to clear lingering errors */
13079         write_csr(dd, MISC_CFG_RSA_CMD, 1);
13080         write_csr(dd, MISC_CFG_RSA_MU, 0);
13081         write_csr(dd, MISC_CFG_FW_CTRL, 0);
13082         /* MISC_STS_8051_DIGEST read-only */
13083         /* MISC_STS_SBM_DIGEST read-only */
13084         /* MISC_STS_PCIE_DIGEST read-only */
13085         /* MISC_STS_FAB_DIGEST read-only */
13086         /* MISC_ERR_STATUS read-only */
13087         write_csr(dd, MISC_ERR_MASK, 0);
13088         write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13089         /* MISC_ERR_FORCE leave alone */
13090 }
13091
13092 /* set TXE CSRs to chip reset defaults */
13093 static void reset_txe_csrs(struct hfi1_devdata *dd)
13094 {
13095         int i;
13096
13097         /*
13098          * TXE Kernel CSRs
13099          */
13100         write_csr(dd, SEND_CTRL, 0);
13101         __cm_reset(dd, 0);      /* reset CM internal state */
13102         /* SEND_CONTEXTS read-only */
13103         /* SEND_DMA_ENGINES read-only */
13104         /* SEND_PIO_MEM_SIZE read-only */
13105         /* SEND_DMA_MEM_SIZE read-only */
13106         write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13107         pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
13108         /* SEND_PIO_ERR_STATUS read-only */
13109         write_csr(dd, SEND_PIO_ERR_MASK, 0);
13110         write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13111         /* SEND_PIO_ERR_FORCE leave alone */
13112         /* SEND_DMA_ERR_STATUS read-only */
13113         write_csr(dd, SEND_DMA_ERR_MASK, 0);
13114         write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13115         /* SEND_DMA_ERR_FORCE leave alone */
13116         /* SEND_EGRESS_ERR_STATUS read-only */
13117         write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13118         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13119         /* SEND_EGRESS_ERR_FORCE leave alone */
13120         write_csr(dd, SEND_BTH_QP, 0);
13121         write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13122         write_csr(dd, SEND_SC2VLT0, 0);
13123         write_csr(dd, SEND_SC2VLT1, 0);
13124         write_csr(dd, SEND_SC2VLT2, 0);
13125         write_csr(dd, SEND_SC2VLT3, 0);
13126         write_csr(dd, SEND_LEN_CHECK0, 0);
13127         write_csr(dd, SEND_LEN_CHECK1, 0);
13128         /* SEND_ERR_STATUS read-only */
13129         write_csr(dd, SEND_ERR_MASK, 0);
13130         write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13131         /* SEND_ERR_FORCE read-only */
13132         for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13133                 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13134         for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13135                 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13136         for (i = 0; i < dd->chip_send_contexts / NUM_CONTEXTS_PER_SET; i++)
13137                 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13138         for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13139                 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13140         for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13141                 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13142         write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13143         write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13144         /* SEND_CM_CREDIT_USED_STATUS read-only */
13145         write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13146         write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13147         write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13148         write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13149         write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13150         for (i = 0; i < TXE_NUM_DATA_VL; i++)
13151                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13152         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13153         /* SEND_CM_CREDIT_USED_VL read-only */
13154         /* SEND_CM_CREDIT_USED_VL15 read-only */
13155         /* SEND_EGRESS_CTXT_STATUS read-only */
13156         /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13157         write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13158         /* SEND_EGRESS_ERR_INFO read-only */
13159         /* SEND_EGRESS_ERR_SOURCE read-only */
13160
13161         /*
13162          * TXE Per-Context CSRs
13163          */
13164         for (i = 0; i < dd->chip_send_contexts; i++) {
13165                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13166                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13167                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13168                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13169                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13170                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13171                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13172                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13173                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13174                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13175                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13176                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13177         }
13178
13179         /*
13180          * TXE Per-SDMA CSRs
13181          */
13182         for (i = 0; i < dd->chip_sdma_engines; i++) {
13183                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13184                 /* SEND_DMA_STATUS read-only */
13185                 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13186                 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13187                 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13188                 /* SEND_DMA_HEAD read-only */
13189                 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13190                 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13191                 /* SEND_DMA_IDLE_CNT read-only */
13192                 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13193                 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13194                 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13195                 /* SEND_DMA_ENG_ERR_STATUS read-only */
13196                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13197                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13198                 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13199                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13200                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13201                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13202                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13203                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13204                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13205                 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13206         }
13207 }
13208
13209 /*
13210  * Expect on entry:
13211  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13212  */
13213 static void init_rbufs(struct hfi1_devdata *dd)
13214 {
13215         u64 reg;
13216         int count;
13217
13218         /*
13219          * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13220          * clear.
13221          */
13222         count = 0;
13223         while (1) {
13224                 reg = read_csr(dd, RCV_STATUS);
13225                 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13226                             | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13227                         break;
13228                 /*
13229                  * Give up after 1ms - maximum wait time.
13230                  *
13231                  * RBuf size is 148KiB.  Slowest possible is PCIe Gen1 x1 at
13232                  * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13233                  *      148 KB / (66% * 250MB/s) = 920us
13234                  */
13235                 if (count++ > 500) {
13236                         dd_dev_err(dd,
13237                                    "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13238                                    __func__, reg);
13239                         break;
13240                 }
13241                 udelay(2); /* do not busy-wait the CSR */
13242         }
13243
13244         /* start the init - expect RcvCtrl to be 0 */
13245         write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13246
13247         /*
13248          * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13249          * period after the write before RcvStatus.RxRbufInitDone is valid.
13250          * The delay in the first run through the loop below is sufficient and
13251          * required before the first read of RcvStatus.RxRbufInintDone.
13252          */
13253         read_csr(dd, RCV_CTRL);
13254
13255         /* wait for the init to finish */
13256         count = 0;
13257         while (1) {
13258                 /* delay is required first time through - see above */
13259                 udelay(2); /* do not busy-wait the CSR */
13260                 reg = read_csr(dd, RCV_STATUS);
13261                 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13262                         break;
13263
13264                 /* give up after 100us - slowest possible at 33MHz is 73us */
13265                 if (count++ > 50) {
13266                         dd_dev_err(dd,
13267                                    "%s: RcvStatus.RxRbufInit not set, continuing\n",
13268                                    __func__);
13269                         break;
13270                 }
13271         }
13272 }
13273
13274 /* set RXE CSRs to chip reset defaults */
13275 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13276 {
13277         int i, j;
13278
13279         /*
13280          * RXE Kernel CSRs
13281          */
13282         write_csr(dd, RCV_CTRL, 0);
13283         init_rbufs(dd);
13284         /* RCV_STATUS read-only */
13285         /* RCV_CONTEXTS read-only */
13286         /* RCV_ARRAY_CNT read-only */
13287         /* RCV_BUF_SIZE read-only */
13288         write_csr(dd, RCV_BTH_QP, 0);
13289         write_csr(dd, RCV_MULTICAST, 0);
13290         write_csr(dd, RCV_BYPASS, 0);
13291         write_csr(dd, RCV_VL15, 0);
13292         /* this is a clear-down */
13293         write_csr(dd, RCV_ERR_INFO,
13294                   RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13295         /* RCV_ERR_STATUS read-only */
13296         write_csr(dd, RCV_ERR_MASK, 0);
13297         write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13298         /* RCV_ERR_FORCE leave alone */
13299         for (i = 0; i < 32; i++)
13300                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13301         for (i = 0; i < 4; i++)
13302                 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13303         for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13304                 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13305         for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13306                 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13307         for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++) {
13308                 write_csr(dd, RCV_RSM_CFG + (8 * i), 0);
13309                 write_csr(dd, RCV_RSM_SELECT + (8 * i), 0);
13310                 write_csr(dd, RCV_RSM_MATCH + (8 * i), 0);
13311         }
13312         for (i = 0; i < 32; i++)
13313                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13314
13315         /*
13316          * RXE Kernel and User Per-Context CSRs
13317          */
13318         for (i = 0; i < dd->chip_rcv_contexts; i++) {
13319                 /* kernel */
13320                 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13321                 /* RCV_CTXT_STATUS read-only */
13322                 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13323                 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13324                 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13325                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13326                 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13327                 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13328                 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13329                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13330                 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13331                 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13332
13333                 /* user */
13334                 /* RCV_HDR_TAIL read-only */
13335                 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13336                 /* RCV_EGR_INDEX_TAIL read-only */
13337                 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13338                 /* RCV_EGR_OFFSET_TAIL read-only */
13339                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13340                         write_uctxt_csr(dd, i,
13341                                         RCV_TID_FLOW_TABLE + (8 * j), 0);
13342                 }
13343         }
13344 }
13345
13346 /*
13347  * Set sc2vl tables.
13348  *
13349  * They power on to zeros, so to avoid send context errors
13350  * they need to be set:
13351  *
13352  * SC 0-7 -> VL 0-7 (respectively)
13353  * SC 15  -> VL 15
13354  * otherwise
13355  *        -> VL 0
13356  */
13357 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13358 {
13359         int i;
13360         /* init per architecture spec, constrained by hardware capability */
13361
13362         /* HFI maps sent packets */
13363         write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13364                 0,
13365                 0, 0, 1, 1,
13366                 2, 2, 3, 3,
13367                 4, 4, 5, 5,
13368                 6, 6, 7, 7));
13369         write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13370                 1,
13371                 8, 0, 9, 0,
13372                 10, 0, 11, 0,
13373                 12, 0, 13, 0,
13374                 14, 0, 15, 15));
13375         write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13376                 2,
13377                 16, 0, 17, 0,
13378                 18, 0, 19, 0,
13379                 20, 0, 21, 0,
13380                 22, 0, 23, 0));
13381         write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13382                 3,
13383                 24, 0, 25, 0,
13384                 26, 0, 27, 0,
13385                 28, 0, 29, 0,
13386                 30, 0, 31, 0));
13387
13388         /* DC maps received packets */
13389         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13390                 15_0,
13391                 0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13392                 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13393         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13394                 31_16,
13395                 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13396                 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13397
13398         /* initialize the cached sc2vl values consistently with h/w */
13399         for (i = 0; i < 32; i++) {
13400                 if (i < 8 || i == 15)
13401                         *((u8 *)(dd->sc2vl) + i) = (u8)i;
13402                 else
13403                         *((u8 *)(dd->sc2vl) + i) = 0;
13404         }
13405 }
13406
13407 /*
13408  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13409  * depend on the chip going through a power-on reset - a driver may be loaded
13410  * and unloaded many times.
13411  *
13412  * Do not write any CSR values to the chip in this routine - there may be
13413  * a reset following the (possible) FLR in this routine.
13414  *
13415  */
13416 static void init_chip(struct hfi1_devdata *dd)
13417 {
13418         int i;
13419
13420         /*
13421          * Put the HFI CSRs in a known state.
13422          * Combine this with a DC reset.
13423          *
13424          * Stop the device from doing anything while we do a
13425          * reset.  We know there are no other active users of
13426          * the device since we are now in charge.  Turn off
13427          * off all outbound and inbound traffic and make sure
13428          * the device does not generate any interrupts.
13429          */
13430
13431         /* disable send contexts and SDMA engines */
13432         write_csr(dd, SEND_CTRL, 0);
13433         for (i = 0; i < dd->chip_send_contexts; i++)
13434                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13435         for (i = 0; i < dd->chip_sdma_engines; i++)
13436                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13437         /* disable port (turn off RXE inbound traffic) and contexts */
13438         write_csr(dd, RCV_CTRL, 0);
13439         for (i = 0; i < dd->chip_rcv_contexts; i++)
13440                 write_csr(dd, RCV_CTXT_CTRL, 0);
13441         /* mask all interrupt sources */
13442         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13443                 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13444
13445         /*
13446          * DC Reset: do a full DC reset before the register clear.
13447          * A recommended length of time to hold is one CSR read,
13448          * so reread the CceDcCtrl.  Then, hold the DC in reset
13449          * across the clear.
13450          */
13451         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13452         (void)read_csr(dd, CCE_DC_CTRL);
13453
13454         if (use_flr) {
13455                 /*
13456                  * A FLR will reset the SPC core and part of the PCIe.
13457                  * The parts that need to be restored have already been
13458                  * saved.
13459                  */
13460                 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13461
13462                 /* do the FLR, the DC reset will remain */
13463                 hfi1_pcie_flr(dd);
13464
13465                 /* restore command and BARs */
13466                 restore_pci_variables(dd);
13467
13468                 if (is_ax(dd)) {
13469                         dd_dev_info(dd, "Resetting CSRs with FLR\n");
13470                         hfi1_pcie_flr(dd);
13471                         restore_pci_variables(dd);
13472                 }
13473         } else {
13474                 dd_dev_info(dd, "Resetting CSRs with writes\n");
13475                 reset_cce_csrs(dd);
13476                 reset_txe_csrs(dd);
13477                 reset_rxe_csrs(dd);
13478                 reset_misc_csrs(dd);
13479         }
13480         /* clear the DC reset */
13481         write_csr(dd, CCE_DC_CTRL, 0);
13482
13483         /* Set the LED off */
13484         setextled(dd, 0);
13485
13486         /*
13487          * Clear the QSFP reset.
13488          * An FLR enforces a 0 on all out pins. The driver does not touch
13489          * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13490          * anything plugged constantly in reset, if it pays attention
13491          * to RESET_N.
13492          * Prime examples of this are optical cables. Set all pins high.
13493          * I2CCLK and I2CDAT will change per direction, and INT_N and
13494          * MODPRS_N are input only and their value is ignored.
13495          */
13496         write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13497         write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13498         init_chip_resources(dd);
13499 }
13500
13501 static void init_early_variables(struct hfi1_devdata *dd)
13502 {
13503         int i;
13504
13505         /* assign link credit variables */
13506         dd->vau = CM_VAU;
13507         dd->link_credits = CM_GLOBAL_CREDITS;
13508         if (is_ax(dd))
13509                 dd->link_credits--;
13510         dd->vcu = cu_to_vcu(hfi1_cu);
13511         /* enough room for 8 MAD packets plus header - 17K */
13512         dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
13513         if (dd->vl15_init > dd->link_credits)
13514                 dd->vl15_init = dd->link_credits;
13515
13516         write_uninitialized_csrs_and_memories(dd);
13517
13518         if (HFI1_CAP_IS_KSET(PKEY_CHECK))
13519                 for (i = 0; i < dd->num_pports; i++) {
13520                         struct hfi1_pportdata *ppd = &dd->pport[i];
13521
13522                         set_partition_keys(ppd);
13523                 }
13524         init_sc2vl_tables(dd);
13525 }
13526
13527 static void init_kdeth_qp(struct hfi1_devdata *dd)
13528 {
13529         /* user changed the KDETH_QP */
13530         if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
13531                 /* out of range or illegal value */
13532                 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
13533                 kdeth_qp = 0;
13534         }
13535         if (kdeth_qp == 0)      /* not set, or failed range check */
13536                 kdeth_qp = DEFAULT_KDETH_QP;
13537
13538         write_csr(dd, SEND_BTH_QP,
13539                   (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
13540                   SEND_BTH_QP_KDETH_QP_SHIFT);
13541
13542         write_csr(dd, RCV_BTH_QP,
13543                   (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
13544                   RCV_BTH_QP_KDETH_QP_SHIFT);
13545 }
13546
13547 /**
13548  * init_qpmap_table
13549  * @dd - device data
13550  * @first_ctxt - first context
13551  * @last_ctxt - first context
13552  *
13553  * This return sets the qpn mapping table that
13554  * is indexed by qpn[8:1].
13555  *
13556  * The routine will round robin the 256 settings
13557  * from first_ctxt to last_ctxt.
13558  *
13559  * The first/last looks ahead to having specialized
13560  * receive contexts for mgmt and bypass.  Normal
13561  * verbs traffic will assumed to be on a range
13562  * of receive contexts.
13563  */
13564 static void init_qpmap_table(struct hfi1_devdata *dd,
13565                              u32 first_ctxt,
13566                              u32 last_ctxt)
13567 {
13568         u64 reg = 0;
13569         u64 regno = RCV_QP_MAP_TABLE;
13570         int i;
13571         u64 ctxt = first_ctxt;
13572
13573         for (i = 0; i < 256; i++) {
13574                 reg |= ctxt << (8 * (i % 8));
13575                 ctxt++;
13576                 if (ctxt > last_ctxt)
13577                         ctxt = first_ctxt;
13578                 if (i % 8 == 7) {
13579                         write_csr(dd, regno, reg);
13580                         reg = 0;
13581                         regno += 8;
13582                 }
13583         }
13584
13585         add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
13586                         | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
13587 }
13588
13589 struct rsm_map_table {
13590         u64 map[NUM_MAP_REGS];
13591         unsigned int used;
13592 };
13593
13594 struct rsm_rule_data {
13595         u8 offset;
13596         u8 pkt_type;
13597         u32 field1_off;
13598         u32 field2_off;
13599         u32 index1_off;
13600         u32 index1_width;
13601         u32 index2_off;
13602         u32 index2_width;
13603         u32 mask1;
13604         u32 value1;
13605         u32 mask2;
13606         u32 value2;
13607 };
13608
13609 /*
13610  * Return an initialized RMT map table for users to fill in.  OK if it
13611  * returns NULL, indicating no table.
13612  */
13613 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
13614 {
13615         struct rsm_map_table *rmt;
13616         u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
13617
13618         rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
13619         if (rmt) {
13620                 memset(rmt->map, rxcontext, sizeof(rmt->map));
13621                 rmt->used = 0;
13622         }
13623
13624         return rmt;
13625 }
13626
13627 /*
13628  * Write the final RMT map table to the chip and free the table.  OK if
13629  * table is NULL.
13630  */
13631 static void complete_rsm_map_table(struct hfi1_devdata *dd,
13632                                    struct rsm_map_table *rmt)
13633 {
13634         int i;
13635
13636         if (rmt) {
13637                 /* write table to chip */
13638                 for (i = 0; i < NUM_MAP_REGS; i++)
13639                         write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
13640
13641                 /* enable RSM */
13642                 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
13643         }
13644 }
13645
13646 /*
13647  * Add a receive side mapping rule.
13648  */
13649 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
13650                          struct rsm_rule_data *rrd)
13651 {
13652         write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
13653                   (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
13654                   1ull << rule_index | /* enable bit */
13655                   (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
13656         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
13657                   (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
13658                   (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
13659                   (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
13660                   (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
13661                   (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
13662                   (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
13663         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
13664                   (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
13665                   (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
13666                   (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
13667                   (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
13668 }
13669
13670 /* return the number of RSM map table entries that will be used for QOS */
13671 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
13672                            unsigned int *np)
13673 {
13674         int i;
13675         unsigned int m, n;
13676         u8 max_by_vl = 0;
13677
13678         /* is QOS active at all? */
13679         if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
13680             num_vls == 1 ||
13681             krcvqsset <= 1)
13682                 goto no_qos;
13683
13684         /* determine bits for qpn */
13685         for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
13686                 if (krcvqs[i] > max_by_vl)
13687                         max_by_vl = krcvqs[i];
13688         if (max_by_vl > 32)
13689                 goto no_qos;
13690         m = ilog2(__roundup_pow_of_two(max_by_vl));
13691
13692         /* determine bits for vl */
13693         n = ilog2(__roundup_pow_of_two(num_vls));
13694
13695         /* reject if too much is used */
13696         if ((m + n) > 7)
13697                 goto no_qos;
13698
13699         if (mp)
13700                 *mp = m;
13701         if (np)
13702                 *np = n;
13703
13704         return 1 << (m + n);
13705
13706 no_qos:
13707         if (mp)
13708                 *mp = 0;
13709         if (np)
13710                 *np = 0;
13711         return 0;
13712 }
13713
13714 /**
13715  * init_qos - init RX qos
13716  * @dd - device data
13717  * @rmt - RSM map table
13718  *
13719  * This routine initializes Rule 0 and the RSM map table to implement
13720  * quality of service (qos).
13721  *
13722  * If all of the limit tests succeed, qos is applied based on the array
13723  * interpretation of krcvqs where entry 0 is VL0.
13724  *
13725  * The number of vl bits (n) and the number of qpn bits (m) are computed to
13726  * feed both the RSM map table and the single rule.
13727  */
13728 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
13729 {
13730         struct rsm_rule_data rrd;
13731         unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
13732         unsigned int rmt_entries;
13733         u64 reg;
13734
13735         if (!rmt)
13736                 goto bail;
13737         rmt_entries = qos_rmt_entries(dd, &m, &n);
13738         if (rmt_entries == 0)
13739                 goto bail;
13740         qpns_per_vl = 1 << m;
13741
13742         /* enough room in the map table? */
13743         rmt_entries = 1 << (m + n);
13744         if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
13745                 goto bail;
13746
13747         /* add qos entries to the the RSM map table */
13748         for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
13749                 unsigned tctxt;
13750
13751                 for (qpn = 0, tctxt = ctxt;
13752                      krcvqs[i] && qpn < qpns_per_vl; qpn++) {
13753                         unsigned idx, regoff, regidx;
13754
13755                         /* generate the index the hardware will produce */
13756                         idx = rmt->used + ((qpn << n) ^ i);
13757                         regoff = (idx % 8) * 8;
13758                         regidx = idx / 8;
13759                         /* replace default with context number */
13760                         reg = rmt->map[regidx];
13761                         reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
13762                                 << regoff);
13763                         reg |= (u64)(tctxt++) << regoff;
13764                         rmt->map[regidx] = reg;
13765                         if (tctxt == ctxt + krcvqs[i])
13766                                 tctxt = ctxt;
13767                 }
13768                 ctxt += krcvqs[i];
13769         }
13770
13771         rrd.offset = rmt->used;
13772         rrd.pkt_type = 2;
13773         rrd.field1_off = LRH_BTH_MATCH_OFFSET;
13774         rrd.field2_off = LRH_SC_MATCH_OFFSET;
13775         rrd.index1_off = LRH_SC_SELECT_OFFSET;
13776         rrd.index1_width = n;
13777         rrd.index2_off = QPN_SELECT_OFFSET;
13778         rrd.index2_width = m + n;
13779         rrd.mask1 = LRH_BTH_MASK;
13780         rrd.value1 = LRH_BTH_VALUE;
13781         rrd.mask2 = LRH_SC_MASK;
13782         rrd.value2 = LRH_SC_VALUE;
13783
13784         /* add rule 0 */
13785         add_rsm_rule(dd, 0, &rrd);
13786
13787         /* mark RSM map entries as used */
13788         rmt->used += rmt_entries;
13789         /* map everything else to the mcast/err/vl15 context */
13790         init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
13791         dd->qos_shift = n + 1;
13792         return;
13793 bail:
13794         dd->qos_shift = 1;
13795         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
13796 }
13797
13798 static void init_user_fecn_handling(struct hfi1_devdata *dd,
13799                                     struct rsm_map_table *rmt)
13800 {
13801         struct rsm_rule_data rrd;
13802         u64 reg;
13803         int i, idx, regoff, regidx;
13804         u8 offset;
13805
13806         /* there needs to be enough room in the map table */
13807         if (rmt->used + dd->num_user_contexts >= NUM_MAP_ENTRIES) {
13808                 dd_dev_err(dd, "User FECN handling disabled - too many user contexts allocated\n");
13809                 return;
13810         }
13811
13812         /*
13813          * RSM will extract the destination context as an index into the
13814          * map table.  The destination contexts are a sequential block
13815          * in the range first_user_ctxt...num_rcv_contexts-1 (inclusive).
13816          * Map entries are accessed as offset + extracted value.  Adjust
13817          * the added offset so this sequence can be placed anywhere in
13818          * the table - as long as the entries themselves do not wrap.
13819          * There are only enough bits in offset for the table size, so
13820          * start with that to allow for a "negative" offset.
13821          */
13822         offset = (u8)(NUM_MAP_ENTRIES + (int)rmt->used -
13823                                                 (int)dd->first_user_ctxt);
13824
13825         for (i = dd->first_user_ctxt, idx = rmt->used;
13826                                 i < dd->num_rcv_contexts; i++, idx++) {
13827                 /* replace with identity mapping */
13828                 regoff = (idx % 8) * 8;
13829                 regidx = idx / 8;
13830                 reg = rmt->map[regidx];
13831                 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
13832                 reg |= (u64)i << regoff;
13833                 rmt->map[regidx] = reg;
13834         }
13835
13836         /*
13837          * For RSM intercept of Expected FECN packets:
13838          * o packet type 0 - expected
13839          * o match on F (bit 95), using select/match 1, and
13840          * o match on SH (bit 133), using select/match 2.
13841          *
13842          * Use index 1 to extract the 8-bit receive context from DestQP
13843          * (start at bit 64).  Use that as the RSM map table index.
13844          */
13845         rrd.offset = offset;
13846         rrd.pkt_type = 0;
13847         rrd.field1_off = 95;
13848         rrd.field2_off = 133;
13849         rrd.index1_off = 64;
13850         rrd.index1_width = 8;
13851         rrd.index2_off = 0;
13852         rrd.index2_width = 0;
13853         rrd.mask1 = 1;
13854         rrd.value1 = 1;
13855         rrd.mask2 = 1;
13856         rrd.value2 = 1;
13857
13858         /* add rule 1 */
13859         add_rsm_rule(dd, 1, &rrd);
13860
13861         rmt->used += dd->num_user_contexts;
13862 }
13863
13864 static void init_rxe(struct hfi1_devdata *dd)
13865 {
13866         struct rsm_map_table *rmt;
13867
13868         /* enable all receive errors */
13869         write_csr(dd, RCV_ERR_MASK, ~0ull);
13870
13871         rmt = alloc_rsm_map_table(dd);
13872         /* set up QOS, including the QPN map table */
13873         init_qos(dd, rmt);
13874         init_user_fecn_handling(dd, rmt);
13875         complete_rsm_map_table(dd, rmt);
13876         kfree(rmt);
13877
13878         /*
13879          * make sure RcvCtrl.RcvWcb <= PCIe Device Control
13880          * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
13881          * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
13882          * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
13883          * Max_PayLoad_Size set to its minimum of 128.
13884          *
13885          * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
13886          * (64 bytes).  Max_Payload_Size is possibly modified upward in
13887          * tune_pcie_caps() which is called after this routine.
13888          */
13889 }
13890
13891 static void init_other(struct hfi1_devdata *dd)
13892 {
13893         /* enable all CCE errors */
13894         write_csr(dd, CCE_ERR_MASK, ~0ull);
13895         /* enable *some* Misc errors */
13896         write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
13897         /* enable all DC errors, except LCB */
13898         write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
13899         write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
13900 }
13901
13902 /*
13903  * Fill out the given AU table using the given CU.  A CU is defined in terms
13904  * AUs.  The table is a an encoding: given the index, how many AUs does that
13905  * represent?
13906  *
13907  * NOTE: Assumes that the register layout is the same for the
13908  * local and remote tables.
13909  */
13910 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
13911                                u32 csr0to3, u32 csr4to7)
13912 {
13913         write_csr(dd, csr0to3,
13914                   0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
13915                   1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
13916                   2ull * cu <<
13917                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
13918                   4ull * cu <<
13919                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
13920         write_csr(dd, csr4to7,
13921                   8ull * cu <<
13922                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
13923                   16ull * cu <<
13924                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
13925                   32ull * cu <<
13926                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
13927                   64ull * cu <<
13928                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
13929 }
13930
13931 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13932 {
13933         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
13934                            SEND_CM_LOCAL_AU_TABLE4_TO7);
13935 }
13936
13937 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
13938 {
13939         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
13940                            SEND_CM_REMOTE_AU_TABLE4_TO7);
13941 }
13942
13943 static void init_txe(struct hfi1_devdata *dd)
13944 {
13945         int i;
13946
13947         /* enable all PIO, SDMA, general, and Egress errors */
13948         write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
13949         write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
13950         write_csr(dd, SEND_ERR_MASK, ~0ull);
13951         write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
13952
13953         /* enable all per-context and per-SDMA engine errors */
13954         for (i = 0; i < dd->chip_send_contexts; i++)
13955                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
13956         for (i = 0; i < dd->chip_sdma_engines; i++)
13957                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
13958
13959         /* set the local CU to AU mapping */
13960         assign_local_cm_au_table(dd, dd->vcu);
13961
13962         /*
13963          * Set reasonable default for Credit Return Timer
13964          * Don't set on Simulator - causes it to choke.
13965          */
13966         if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
13967                 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
13968 }
13969
13970 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt, u16 jkey)
13971 {
13972         struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
13973         unsigned sctxt;
13974         int ret = 0;
13975         u64 reg;
13976
13977         if (!rcd || !rcd->sc) {
13978                 ret = -EINVAL;
13979                 goto done;
13980         }
13981         sctxt = rcd->sc->hw_context;
13982         reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
13983                 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
13984                  SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
13985         /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
13986         if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
13987                 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
13988         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
13989         /*
13990          * Enable send-side J_KEY integrity check, unless this is A0 h/w
13991          */
13992         if (!is_ax(dd)) {
13993                 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
13994                 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
13995                 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
13996         }
13997
13998         /* Enable J_KEY check on receive context. */
13999         reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14000                 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14001                  RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14002         write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, reg);
14003 done:
14004         return ret;
14005 }
14006
14007 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, unsigned ctxt)
14008 {
14009         struct hfi1_ctxtdata *rcd = dd->rcd[ctxt];
14010         unsigned sctxt;
14011         int ret = 0;
14012         u64 reg;
14013
14014         if (!rcd || !rcd->sc) {
14015                 ret = -EINVAL;
14016                 goto done;
14017         }
14018         sctxt = rcd->sc->hw_context;
14019         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14020         /*
14021          * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14022          * This check would not have been enabled for A0 h/w, see
14023          * set_ctxt_jkey().
14024          */
14025         if (!is_ax(dd)) {
14026                 reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14027                 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14028                 write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14029         }
14030         /* Turn off the J_KEY on the receive side */
14031         write_kctxt_csr(dd, ctxt, RCV_KEY_CTRL, 0);
14032 done:
14033         return ret;
14034 }
14035
14036 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt, u16 pkey)
14037 {
14038         struct hfi1_ctxtdata *rcd;
14039         unsigned sctxt;
14040         int ret = 0;
14041         u64 reg;
14042
14043         if (ctxt < dd->num_rcv_contexts) {
14044                 rcd = dd->rcd[ctxt];
14045         } else {
14046                 ret = -EINVAL;
14047                 goto done;
14048         }
14049         if (!rcd || !rcd->sc) {
14050                 ret = -EINVAL;
14051                 goto done;
14052         }
14053         sctxt = rcd->sc->hw_context;
14054         reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14055                 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14056         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14057         reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14058         reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14059         reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14060         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14061 done:
14062         return ret;
14063 }
14064
14065 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, unsigned ctxt)
14066 {
14067         struct hfi1_ctxtdata *rcd;
14068         unsigned sctxt;
14069         int ret = 0;
14070         u64 reg;
14071
14072         if (ctxt < dd->num_rcv_contexts) {
14073                 rcd = dd->rcd[ctxt];
14074         } else {
14075                 ret = -EINVAL;
14076                 goto done;
14077         }
14078         if (!rcd || !rcd->sc) {
14079                 ret = -EINVAL;
14080                 goto done;
14081         }
14082         sctxt = rcd->sc->hw_context;
14083         reg = read_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE);
14084         reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14085         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_ENABLE, reg);
14086         write_kctxt_csr(dd, sctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14087 done:
14088         return ret;
14089 }
14090
14091 /*
14092  * Start doing the clean up the the chip. Our clean up happens in multiple
14093  * stages and this is just the first.
14094  */
14095 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14096 {
14097         aspm_exit(dd);
14098         free_cntrs(dd);
14099         free_rcverr(dd);
14100         clean_up_interrupts(dd);
14101         finish_chip_resources(dd);
14102 }
14103
14104 #define HFI_BASE_GUID(dev) \
14105         ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14106
14107 /*
14108  * Information can be shared between the two HFIs on the same ASIC
14109  * in the same OS.  This function finds the peer device and sets
14110  * up a shared structure.
14111  */
14112 static int init_asic_data(struct hfi1_devdata *dd)
14113 {
14114         unsigned long flags;
14115         struct hfi1_devdata *tmp, *peer = NULL;
14116         struct hfi1_asic_data *asic_data;
14117         int ret = 0;
14118
14119         /* pre-allocate the asic structure in case we are the first device */
14120         asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14121         if (!asic_data)
14122                 return -ENOMEM;
14123
14124         spin_lock_irqsave(&hfi1_devs_lock, flags);
14125         /* Find our peer device */
14126         list_for_each_entry(tmp, &hfi1_dev_list, list) {
14127                 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(tmp)) &&
14128                     dd->unit != tmp->unit) {
14129                         peer = tmp;
14130                         break;
14131                 }
14132         }
14133
14134         if (peer) {
14135                 /* use already allocated structure */
14136                 dd->asic_data = peer->asic_data;
14137                 kfree(asic_data);
14138         } else {
14139                 dd->asic_data = asic_data;
14140                 mutex_init(&dd->asic_data->asic_resource_mutex);
14141         }
14142         dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14143         spin_unlock_irqrestore(&hfi1_devs_lock, flags);
14144         return ret;
14145 }
14146
14147 /*
14148  * Set dd->boardname.  Use a generic name if a name is not returned from
14149  * EFI variable space.
14150  *
14151  * Return 0 on success, -ENOMEM if space could not be allocated.
14152  */
14153 static int obtain_boardname(struct hfi1_devdata *dd)
14154 {
14155         /* generic board description */
14156         const char generic[] =
14157                 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14158         unsigned long size;
14159         int ret;
14160
14161         ret = read_hfi1_efi_var(dd, "description", &size,
14162                                 (void **)&dd->boardname);
14163         if (ret) {
14164                 dd_dev_info(dd, "Board description not found\n");
14165                 /* use generic description */
14166                 dd->boardname = kstrdup(generic, GFP_KERNEL);
14167                 if (!dd->boardname)
14168                         return -ENOMEM;
14169         }
14170         return 0;
14171 }
14172
14173 /*
14174  * Check the interrupt registers to make sure that they are mapped correctly.
14175  * It is intended to help user identify any mismapping by VMM when the driver
14176  * is running in a VM. This function should only be called before interrupt
14177  * is set up properly.
14178  *
14179  * Return 0 on success, -EINVAL on failure.
14180  */
14181 static int check_int_registers(struct hfi1_devdata *dd)
14182 {
14183         u64 reg;
14184         u64 all_bits = ~(u64)0;
14185         u64 mask;
14186
14187         /* Clear CceIntMask[0] to avoid raising any interrupts */
14188         mask = read_csr(dd, CCE_INT_MASK);
14189         write_csr(dd, CCE_INT_MASK, 0ull);
14190         reg = read_csr(dd, CCE_INT_MASK);
14191         if (reg)
14192                 goto err_exit;
14193
14194         /* Clear all interrupt status bits */
14195         write_csr(dd, CCE_INT_CLEAR, all_bits);
14196         reg = read_csr(dd, CCE_INT_STATUS);
14197         if (reg)
14198                 goto err_exit;
14199
14200         /* Set all interrupt status bits */
14201         write_csr(dd, CCE_INT_FORCE, all_bits);
14202         reg = read_csr(dd, CCE_INT_STATUS);
14203         if (reg != all_bits)
14204                 goto err_exit;
14205
14206         /* Restore the interrupt mask */
14207         write_csr(dd, CCE_INT_CLEAR, all_bits);
14208         write_csr(dd, CCE_INT_MASK, mask);
14209
14210         return 0;
14211 err_exit:
14212         write_csr(dd, CCE_INT_MASK, mask);
14213         dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14214         return -EINVAL;
14215 }
14216
14217 /**
14218  * Allocate and initialize the device structure for the hfi.
14219  * @dev: the pci_dev for hfi1_ib device
14220  * @ent: pci_device_id struct for this dev
14221  *
14222  * Also allocates, initializes, and returns the devdata struct for this
14223  * device instance
14224  *
14225  * This is global, and is called directly at init to set up the
14226  * chip-specific function pointers for later use.
14227  */
14228 struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
14229                                   const struct pci_device_id *ent)
14230 {
14231         struct hfi1_devdata *dd;
14232         struct hfi1_pportdata *ppd;
14233         u64 reg;
14234         int i, ret;
14235         static const char * const inames[] = { /* implementation names */
14236                 "RTL silicon",
14237                 "RTL VCS simulation",
14238                 "RTL FPGA emulation",
14239                 "Functional simulator"
14240         };
14241         struct pci_dev *parent = pdev->bus->self;
14242
14243         dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
14244                                 sizeof(struct hfi1_pportdata));
14245         if (IS_ERR(dd))
14246                 goto bail;
14247         ppd = dd->pport;
14248         for (i = 0; i < dd->num_pports; i++, ppd++) {
14249                 int vl;
14250                 /* init common fields */
14251                 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14252                 /* DC supports 4 link widths */
14253                 ppd->link_width_supported =
14254                         OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14255                         OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14256                 ppd->link_width_downgrade_supported =
14257                         ppd->link_width_supported;
14258                 /* start out enabling only 4X */
14259                 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14260                 ppd->link_width_downgrade_enabled =
14261                                         ppd->link_width_downgrade_supported;
14262                 /* link width active is 0 when link is down */
14263                 /* link width downgrade active is 0 when link is down */
14264
14265                 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14266                     num_vls > HFI1_MAX_VLS_SUPPORTED) {
14267                         hfi1_early_err(&pdev->dev,
14268                                        "Invalid num_vls %u, using %u VLs\n",
14269                                     num_vls, HFI1_MAX_VLS_SUPPORTED);
14270                         num_vls = HFI1_MAX_VLS_SUPPORTED;
14271                 }
14272                 ppd->vls_supported = num_vls;
14273                 ppd->vls_operational = ppd->vls_supported;
14274                 ppd->actual_vls_operational = ppd->vls_supported;
14275                 /* Set the default MTU. */
14276                 for (vl = 0; vl < num_vls; vl++)
14277                         dd->vld[vl].mtu = hfi1_max_mtu;
14278                 dd->vld[15].mtu = MAX_MAD_PACKET;
14279                 /*
14280                  * Set the initial values to reasonable default, will be set
14281                  * for real when link is up.
14282                  */
14283                 ppd->lstate = IB_PORT_DOWN;
14284                 ppd->overrun_threshold = 0x4;
14285                 ppd->phy_error_threshold = 0xf;
14286                 ppd->port_crc_mode_enabled = link_crc_mask;
14287                 /* initialize supported LTP CRC mode */
14288                 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14289                 /* initialize enabled LTP CRC mode */
14290                 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14291                 /* start in offline */
14292                 ppd->host_link_state = HLS_DN_OFFLINE;
14293                 init_vl_arb_caches(ppd);
14294                 ppd->last_pstate = 0xff; /* invalid value */
14295         }
14296
14297         dd->link_default = HLS_DN_POLL;
14298
14299         /*
14300          * Do remaining PCIe setup and save PCIe values in dd.
14301          * Any error printing is already done by the init code.
14302          * On return, we have the chip mapped.
14303          */
14304         ret = hfi1_pcie_ddinit(dd, pdev, ent);
14305         if (ret < 0)
14306                 goto bail_free;
14307
14308         /* verify that reads actually work, save revision for reset check */
14309         dd->revision = read_csr(dd, CCE_REVISION);
14310         if (dd->revision == ~(u64)0) {
14311                 dd_dev_err(dd, "cannot read chip CSRs\n");
14312                 ret = -EINVAL;
14313                 goto bail_cleanup;
14314         }
14315         dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14316                         & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14317         dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14318                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
14319
14320         /*
14321          * Check interrupt registers mapping if the driver has no access to
14322          * the upstream component. In this case, it is likely that the driver
14323          * is running in a VM.
14324          */
14325         if (!parent) {
14326                 ret = check_int_registers(dd);
14327                 if (ret)
14328                         goto bail_cleanup;
14329         }
14330
14331         /*
14332          * obtain the hardware ID - NOT related to unit, which is a
14333          * software enumeration
14334          */
14335         reg = read_csr(dd, CCE_REVISION2);
14336         dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14337                                         & CCE_REVISION2_HFI_ID_MASK;
14338         /* the variable size will remove unwanted bits */
14339         dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14340         dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14341         dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14342                     dd->icode < ARRAY_SIZE(inames) ?
14343                     inames[dd->icode] : "unknown", (int)dd->irev);
14344
14345         /* speeds the hardware can support */
14346         dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14347         /* speeds allowed to run at */
14348         dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14349         /* give a reasonable active value, will be set on link up */
14350         dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14351
14352         dd->chip_rcv_contexts = read_csr(dd, RCV_CONTEXTS);
14353         dd->chip_send_contexts = read_csr(dd, SEND_CONTEXTS);
14354         dd->chip_sdma_engines = read_csr(dd, SEND_DMA_ENGINES);
14355         dd->chip_pio_mem_size = read_csr(dd, SEND_PIO_MEM_SIZE);
14356         dd->chip_sdma_mem_size = read_csr(dd, SEND_DMA_MEM_SIZE);
14357         /* fix up link widths for emulation _p */
14358         ppd = dd->pport;
14359         if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14360                 ppd->link_width_supported =
14361                         ppd->link_width_enabled =
14362                         ppd->link_width_downgrade_supported =
14363                         ppd->link_width_downgrade_enabled =
14364                                 OPA_LINK_WIDTH_1X;
14365         }
14366         /* insure num_vls isn't larger than number of sdma engines */
14367         if (HFI1_CAP_IS_KSET(SDMA) && num_vls > dd->chip_sdma_engines) {
14368                 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14369                            num_vls, dd->chip_sdma_engines);
14370                 num_vls = dd->chip_sdma_engines;
14371                 ppd->vls_supported = dd->chip_sdma_engines;
14372                 ppd->vls_operational = ppd->vls_supported;
14373         }
14374
14375         /*
14376          * Convert the ns parameter to the 64 * cclocks used in the CSR.
14377          * Limit the max if larger than the field holds.  If timeout is
14378          * non-zero, then the calculated field will be at least 1.
14379          *
14380          * Must be after icode is set up - the cclock rate depends
14381          * on knowing the hardware being used.
14382          */
14383         dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14384         if (dd->rcv_intr_timeout_csr >
14385                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14386                 dd->rcv_intr_timeout_csr =
14387                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14388         else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14389                 dd->rcv_intr_timeout_csr = 1;
14390
14391         /* needs to be done before we look for the peer device */
14392         read_guid(dd);
14393
14394         /* set up shared ASIC data with peer device */
14395         ret = init_asic_data(dd);
14396         if (ret)
14397                 goto bail_cleanup;
14398
14399         /* obtain chip sizes, reset chip CSRs */
14400         init_chip(dd);
14401
14402         /* read in the PCIe link speed information */
14403         ret = pcie_speeds(dd);
14404         if (ret)
14405                 goto bail_cleanup;
14406
14407         /* Needs to be called before hfi1_firmware_init */
14408         get_platform_config(dd);
14409
14410         /* read in firmware */
14411         ret = hfi1_firmware_init(dd);
14412         if (ret)
14413                 goto bail_cleanup;
14414
14415         /*
14416          * In general, the PCIe Gen3 transition must occur after the
14417          * chip has been idled (so it won't initiate any PCIe transactions
14418          * e.g. an interrupt) and before the driver changes any registers
14419          * (the transition will reset the registers).
14420          *
14421          * In particular, place this call after:
14422          * - init_chip()     - the chip will not initiate any PCIe transactions
14423          * - pcie_speeds()   - reads the current link speed
14424          * - hfi1_firmware_init() - the needed firmware is ready to be
14425          *                          downloaded
14426          */
14427         ret = do_pcie_gen3_transition(dd);
14428         if (ret)
14429                 goto bail_cleanup;
14430
14431         /* start setting dd values and adjusting CSRs */
14432         init_early_variables(dd);
14433
14434         parse_platform_config(dd);
14435
14436         ret = obtain_boardname(dd);
14437         if (ret)
14438                 goto bail_cleanup;
14439
14440         snprintf(dd->boardversion, BOARD_VERS_MAX,
14441                  "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
14442                  HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
14443                  (u32)dd->majrev,
14444                  (u32)dd->minrev,
14445                  (dd->revision >> CCE_REVISION_SW_SHIFT)
14446                     & CCE_REVISION_SW_MASK);
14447
14448         /*
14449          * The real cpu mask is part of the affinity struct but has to be
14450          * initialized earlier than the rest of the affinity struct because it
14451          * is needed to calculate the number of user contexts in
14452          * set_up_context_variables(). However, hfi1_dev_affinity_init(),
14453          * which initializes the rest of the affinity struct members,
14454          * depends on set_up_context_variables() for the number of kernel
14455          * contexts, so it cannot be called before set_up_context_variables().
14456          */
14457         ret = init_real_cpu_mask(dd);
14458         if (ret)
14459                 goto bail_cleanup;
14460
14461         ret = set_up_context_variables(dd);
14462         if (ret)
14463                 goto bail_cleanup;
14464
14465         /* set initial RXE CSRs */
14466         init_rxe(dd);
14467         /* set initial TXE CSRs */
14468         init_txe(dd);
14469         /* set initial non-RXE, non-TXE CSRs */
14470         init_other(dd);
14471         /* set up KDETH QP prefix in both RX and TX CSRs */
14472         init_kdeth_qp(dd);
14473
14474         hfi1_dev_affinity_init(dd);
14475
14476         /* send contexts must be set up before receive contexts */
14477         ret = init_send_contexts(dd);
14478         if (ret)
14479                 goto bail_cleanup;
14480
14481         ret = hfi1_create_ctxts(dd);
14482         if (ret)
14483                 goto bail_cleanup;
14484
14485         dd->rcvhdrsize = DEFAULT_RCVHDRSIZE;
14486         /*
14487          * rcd[0] is guaranteed to be valid by this point. Also, all
14488          * context are using the same value, as per the module parameter.
14489          */
14490         dd->rhf_offset = dd->rcd[0]->rcvhdrqentsize - sizeof(u64) / sizeof(u32);
14491
14492         ret = init_pervl_scs(dd);
14493         if (ret)
14494                 goto bail_cleanup;
14495
14496         /* sdma init */
14497         for (i = 0; i < dd->num_pports; ++i) {
14498                 ret = sdma_init(dd, i);
14499                 if (ret)
14500                         goto bail_cleanup;
14501         }
14502
14503         /* use contexts created by hfi1_create_ctxts */
14504         ret = set_up_interrupts(dd);
14505         if (ret)
14506                 goto bail_cleanup;
14507
14508         /* set up LCB access - must be after set_up_interrupts() */
14509         init_lcb_access(dd);
14510
14511         snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
14512                  dd->base_guid & 0xFFFFFF);
14513
14514         dd->oui1 = dd->base_guid >> 56 & 0xFF;
14515         dd->oui2 = dd->base_guid >> 48 & 0xFF;
14516         dd->oui3 = dd->base_guid >> 40 & 0xFF;
14517
14518         ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
14519         if (ret)
14520                 goto bail_clear_intr;
14521         check_fabric_firmware_versions(dd);
14522
14523         thermal_init(dd);
14524
14525         ret = init_cntrs(dd);
14526         if (ret)
14527                 goto bail_clear_intr;
14528
14529         ret = init_rcverr(dd);
14530         if (ret)
14531                 goto bail_free_cntrs;
14532
14533         ret = eprom_init(dd);
14534         if (ret)
14535                 goto bail_free_rcverr;
14536
14537         goto bail;
14538
14539 bail_free_rcverr:
14540         free_rcverr(dd);
14541 bail_free_cntrs:
14542         free_cntrs(dd);
14543 bail_clear_intr:
14544         clean_up_interrupts(dd);
14545 bail_cleanup:
14546         hfi1_pcie_ddcleanup(dd);
14547 bail_free:
14548         hfi1_free_devdata(dd);
14549         dd = ERR_PTR(ret);
14550 bail:
14551         return dd;
14552 }
14553
14554 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
14555                         u32 dw_len)
14556 {
14557         u32 delta_cycles;
14558         u32 current_egress_rate = ppd->current_egress_rate;
14559         /* rates here are in units of 10^6 bits/sec */
14560
14561         if (desired_egress_rate == -1)
14562                 return 0; /* shouldn't happen */
14563
14564         if (desired_egress_rate >= current_egress_rate)
14565                 return 0; /* we can't help go faster, only slower */
14566
14567         delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
14568                         egress_cycles(dw_len * 4, current_egress_rate);
14569
14570         return (u16)delta_cycles;
14571 }
14572
14573 /**
14574  * create_pbc - build a pbc for transmission
14575  * @flags: special case flags or-ed in built pbc
14576  * @srate: static rate
14577  * @vl: vl
14578  * @dwlen: dword length (header words + data words + pbc words)
14579  *
14580  * Create a PBC with the given flags, rate, VL, and length.
14581  *
14582  * NOTE: The PBC created will not insert any HCRC - all callers but one are
14583  * for verbs, which does not use this PSM feature.  The lone other caller
14584  * is for the diagnostic interface which calls this if the user does not
14585  * supply their own PBC.
14586  */
14587 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
14588                u32 dw_len)
14589 {
14590         u64 pbc, delay = 0;
14591
14592         if (unlikely(srate_mbs))
14593                 delay = delay_cycles(ppd, srate_mbs, dw_len);
14594
14595         pbc = flags
14596                 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
14597                 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
14598                 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
14599                 | (dw_len & PBC_LENGTH_DWS_MASK)
14600                         << PBC_LENGTH_DWS_SHIFT;
14601
14602         return pbc;
14603 }
14604
14605 #define SBUS_THERMAL    0x4f
14606 #define SBUS_THERM_MONITOR_MODE 0x1
14607
14608 #define THERM_FAILURE(dev, ret, reason) \
14609         dd_dev_err((dd),                                                \
14610                    "Thermal sensor initialization failed: %s (%d)\n",   \
14611                    (reason), (ret))
14612
14613 /*
14614  * Initialize the thermal sensor.
14615  *
14616  * After initialization, enable polling of thermal sensor through
14617  * SBus interface. In order for this to work, the SBus Master
14618  * firmware has to be loaded due to the fact that the HW polling
14619  * logic uses SBus interrupts, which are not supported with
14620  * default firmware. Otherwise, no data will be returned through
14621  * the ASIC_STS_THERM CSR.
14622  */
14623 static int thermal_init(struct hfi1_devdata *dd)
14624 {
14625         int ret = 0;
14626
14627         if (dd->icode != ICODE_RTL_SILICON ||
14628             check_chip_resource(dd, CR_THERM_INIT, NULL))
14629                 return ret;
14630
14631         ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
14632         if (ret) {
14633                 THERM_FAILURE(dd, ret, "Acquire SBus");
14634                 return ret;
14635         }
14636
14637         dd_dev_info(dd, "Initializing thermal sensor\n");
14638         /* Disable polling of thermal readings */
14639         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
14640         msleep(100);
14641         /* Thermal Sensor Initialization */
14642         /*    Step 1: Reset the Thermal SBus Receiver */
14643         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14644                                 RESET_SBUS_RECEIVER, 0);
14645         if (ret) {
14646                 THERM_FAILURE(dd, ret, "Bus Reset");
14647                 goto done;
14648         }
14649         /*    Step 2: Set Reset bit in Thermal block */
14650         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14651                                 WRITE_SBUS_RECEIVER, 0x1);
14652         if (ret) {
14653                 THERM_FAILURE(dd, ret, "Therm Block Reset");
14654                 goto done;
14655         }
14656         /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
14657         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
14658                                 WRITE_SBUS_RECEIVER, 0x32);
14659         if (ret) {
14660                 THERM_FAILURE(dd, ret, "Write Clock Div");
14661                 goto done;
14662         }
14663         /*    Step 4: Select temperature mode */
14664         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
14665                                 WRITE_SBUS_RECEIVER,
14666                                 SBUS_THERM_MONITOR_MODE);
14667         if (ret) {
14668                 THERM_FAILURE(dd, ret, "Write Mode Sel");
14669                 goto done;
14670         }
14671         /*    Step 5: De-assert block reset and start conversion */
14672         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
14673                                 WRITE_SBUS_RECEIVER, 0x2);
14674         if (ret) {
14675                 THERM_FAILURE(dd, ret, "Write Reset Deassert");
14676                 goto done;
14677         }
14678         /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
14679         msleep(22);
14680
14681         /* Enable polling of thermal readings */
14682         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
14683
14684         /* Set initialized flag */
14685         ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
14686         if (ret)
14687                 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
14688
14689 done:
14690         release_chip_resource(dd, CR_SBUS);
14691         return ret;
14692 }
14693
14694 static void handle_temp_err(struct hfi1_devdata *dd)
14695 {
14696         struct hfi1_pportdata *ppd = &dd->pport[0];
14697         /*
14698          * Thermal Critical Interrupt
14699          * Put the device into forced freeze mode, take link down to
14700          * offline, and put DC into reset.
14701          */
14702         dd_dev_emerg(dd,
14703                      "Critical temperature reached! Forcing device into freeze mode!\n");
14704         dd->flags |= HFI1_FORCED_FREEZE;
14705         start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
14706         /*
14707          * Shut DC down as much and as quickly as possible.
14708          *
14709          * Step 1: Take the link down to OFFLINE. This will cause the
14710          *         8051 to put the Serdes in reset. However, we don't want to
14711          *         go through the entire link state machine since we want to
14712          *         shutdown ASAP. Furthermore, this is not a graceful shutdown
14713          *         but rather an attempt to save the chip.
14714          *         Code below is almost the same as quiet_serdes() but avoids
14715          *         all the extra work and the sleeps.
14716          */
14717         ppd->driver_link_ready = 0;
14718         ppd->link_enabled = 0;
14719         set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
14720                                 PLS_OFFLINE);
14721         /*
14722          * Step 2: Shutdown LCB and 8051
14723          *         After shutdown, do not restore DC_CFG_RESET value.
14724          */
14725         dc_shutdown(dd);
14726 }