Merge tag 'nfsd-5.3-1' of git://linux-nfs.org/~bfields/linux
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / hfi1 / chip.c
1 /*
2  * Copyright(c) 2015 - 2018 Intel Corporation.
3  *
4  * This file is provided under a dual BSD/GPLv2 license.  When using or
5  * redistributing this file, you may do so under either license.
6  *
7  * GPL LICENSE SUMMARY
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of version 2 of the GNU General Public License as
11  * published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope that it will be useful, but
14  * WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16  * General Public License for more details.
17  *
18  * BSD LICENSE
19  *
20  * Redistribution and use in source and binary forms, with or without
21  * modification, are permitted provided that the following conditions
22  * are met:
23  *
24  *  - Redistributions of source code must retain the above copyright
25  *    notice, this list of conditions and the following disclaimer.
26  *  - Redistributions in binary form must reproduce the above copyright
27  *    notice, this list of conditions and the following disclaimer in
28  *    the documentation and/or other materials provided with the
29  *    distribution.
30  *  - Neither the name of Intel Corporation nor the names of its
31  *    contributors may be used to endorse or promote products derived
32  *    from this software without specific prior written permission.
33  *
34  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45  *
46  */
47
48 /*
49  * This file contains all of the code that is specific to the HFI chip
50  */
51
52 #include <linux/pci.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/module.h>
56
57 #include "hfi.h"
58 #include "trace.h"
59 #include "mad.h"
60 #include "pio.h"
61 #include "sdma.h"
62 #include "eprom.h"
63 #include "efivar.h"
64 #include "platform.h"
65 #include "aspm.h"
66 #include "affinity.h"
67 #include "debugfs.h"
68 #include "fault.h"
69
70 uint kdeth_qp;
71 module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
72 MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
73
74 uint num_vls = HFI1_MAX_VLS_SUPPORTED;
75 module_param(num_vls, uint, S_IRUGO);
76 MODULE_PARM_DESC(num_vls, "Set number of Virtual Lanes to use (1-8)");
77
78 /*
79  * Default time to aggregate two 10K packets from the idle state
80  * (timer not running). The timer starts at the end of the first packet,
81  * so only the time for one 10K packet and header plus a bit extra is needed.
82  * 10 * 1024 + 64 header byte = 10304 byte
83  * 10304 byte / 12.5 GB/s = 824.32ns
84  */
85 uint rcv_intr_timeout = (824 + 16); /* 16 is for coalescing interrupt */
86 module_param(rcv_intr_timeout, uint, S_IRUGO);
87 MODULE_PARM_DESC(rcv_intr_timeout, "Receive interrupt mitigation timeout in ns");
88
89 uint rcv_intr_count = 16; /* same as qib */
90 module_param(rcv_intr_count, uint, S_IRUGO);
91 MODULE_PARM_DESC(rcv_intr_count, "Receive interrupt mitigation count");
92
93 ushort link_crc_mask = SUPPORTED_CRCS;
94 module_param(link_crc_mask, ushort, S_IRUGO);
95 MODULE_PARM_DESC(link_crc_mask, "CRCs to use on the link");
96
97 uint loopback;
98 module_param_named(loopback, loopback, uint, S_IRUGO);
99 MODULE_PARM_DESC(loopback, "Put into loopback mode (1 = serdes, 3 = external cable");
100
101 /* Other driver tunables */
102 uint rcv_intr_dynamic = 1; /* enable dynamic mode for rcv int mitigation*/
103 static ushort crc_14b_sideband = 1;
104 static uint use_flr = 1;
105 uint quick_linkup; /* skip LNI */
106
107 struct flag_table {
108         u64 flag;       /* the flag */
109         char *str;      /* description string */
110         u16 extra;      /* extra information */
111         u16 unused0;
112         u32 unused1;
113 };
114
115 /* str must be a string constant */
116 #define FLAG_ENTRY(str, extra, flag) {flag, str, extra}
117 #define FLAG_ENTRY0(str, flag) {flag, str, 0}
118
119 /* Send Error Consequences */
120 #define SEC_WRITE_DROPPED       0x1
121 #define SEC_PACKET_DROPPED      0x2
122 #define SEC_SC_HALTED           0x4     /* per-context only */
123 #define SEC_SPC_FREEZE          0x8     /* per-HFI only */
124
125 #define DEFAULT_KRCVQS            2
126 #define MIN_KERNEL_KCTXTS         2
127 #define FIRST_KERNEL_KCTXT        1
128
129 /*
130  * RSM instance allocation
131  *   0 - Verbs
132  *   1 - User Fecn Handling
133  *   2 - Vnic
134  */
135 #define RSM_INS_VERBS             0
136 #define RSM_INS_FECN              1
137 #define RSM_INS_VNIC              2
138
139 /* Bit offset into the GUID which carries HFI id information */
140 #define GUID_HFI_INDEX_SHIFT     39
141
142 /* extract the emulation revision */
143 #define emulator_rev(dd) ((dd)->irev >> 8)
144 /* parallel and serial emulation versions are 3 and 4 respectively */
145 #define is_emulator_p(dd) ((((dd)->irev) & 0xf) == 3)
146 #define is_emulator_s(dd) ((((dd)->irev) & 0xf) == 4)
147
148 /* RSM fields for Verbs */
149 /* packet type */
150 #define IB_PACKET_TYPE         2ull
151 #define QW_SHIFT               6ull
152 /* QPN[7..1] */
153 #define QPN_WIDTH              7ull
154
155 /* LRH.BTH: QW 0, OFFSET 48 - for match */
156 #define LRH_BTH_QW             0ull
157 #define LRH_BTH_BIT_OFFSET     48ull
158 #define LRH_BTH_OFFSET(off)    ((LRH_BTH_QW << QW_SHIFT) | (off))
159 #define LRH_BTH_MATCH_OFFSET   LRH_BTH_OFFSET(LRH_BTH_BIT_OFFSET)
160 #define LRH_BTH_SELECT
161 #define LRH_BTH_MASK           3ull
162 #define LRH_BTH_VALUE          2ull
163
164 /* LRH.SC[3..0] QW 0, OFFSET 56 - for match */
165 #define LRH_SC_QW              0ull
166 #define LRH_SC_BIT_OFFSET      56ull
167 #define LRH_SC_OFFSET(off)     ((LRH_SC_QW << QW_SHIFT) | (off))
168 #define LRH_SC_MATCH_OFFSET    LRH_SC_OFFSET(LRH_SC_BIT_OFFSET)
169 #define LRH_SC_MASK            128ull
170 #define LRH_SC_VALUE           0ull
171
172 /* SC[n..0] QW 0, OFFSET 60 - for select */
173 #define LRH_SC_SELECT_OFFSET  ((LRH_SC_QW << QW_SHIFT) | (60ull))
174
175 /* QPN[m+n:1] QW 1, OFFSET 1 */
176 #define QPN_SELECT_OFFSET      ((1ull << QW_SHIFT) | (1ull))
177
178 /* RSM fields for Vnic */
179 /* L2_TYPE: QW 0, OFFSET 61 - for match */
180 #define L2_TYPE_QW             0ull
181 #define L2_TYPE_BIT_OFFSET     61ull
182 #define L2_TYPE_OFFSET(off)    ((L2_TYPE_QW << QW_SHIFT) | (off))
183 #define L2_TYPE_MATCH_OFFSET   L2_TYPE_OFFSET(L2_TYPE_BIT_OFFSET)
184 #define L2_TYPE_MASK           3ull
185 #define L2_16B_VALUE           2ull
186
187 /* L4_TYPE QW 1, OFFSET 0 - for match */
188 #define L4_TYPE_QW              1ull
189 #define L4_TYPE_BIT_OFFSET      0ull
190 #define L4_TYPE_OFFSET(off)     ((L4_TYPE_QW << QW_SHIFT) | (off))
191 #define L4_TYPE_MATCH_OFFSET    L4_TYPE_OFFSET(L4_TYPE_BIT_OFFSET)
192 #define L4_16B_TYPE_MASK        0xFFull
193 #define L4_16B_ETH_VALUE        0x78ull
194
195 /* 16B VESWID - for select */
196 #define L4_16B_HDR_VESWID_OFFSET  ((2 << QW_SHIFT) | (16ull))
197 /* 16B ENTROPY - for select */
198 #define L2_16B_ENTROPY_OFFSET     ((1 << QW_SHIFT) | (32ull))
199
200 /* defines to build power on SC2VL table */
201 #define SC2VL_VAL( \
202         num, \
203         sc0, sc0val, \
204         sc1, sc1val, \
205         sc2, sc2val, \
206         sc3, sc3val, \
207         sc4, sc4val, \
208         sc5, sc5val, \
209         sc6, sc6val, \
210         sc7, sc7val) \
211 ( \
212         ((u64)(sc0val) << SEND_SC2VLT##num##_SC##sc0##_SHIFT) | \
213         ((u64)(sc1val) << SEND_SC2VLT##num##_SC##sc1##_SHIFT) | \
214         ((u64)(sc2val) << SEND_SC2VLT##num##_SC##sc2##_SHIFT) | \
215         ((u64)(sc3val) << SEND_SC2VLT##num##_SC##sc3##_SHIFT) | \
216         ((u64)(sc4val) << SEND_SC2VLT##num##_SC##sc4##_SHIFT) | \
217         ((u64)(sc5val) << SEND_SC2VLT##num##_SC##sc5##_SHIFT) | \
218         ((u64)(sc6val) << SEND_SC2VLT##num##_SC##sc6##_SHIFT) | \
219         ((u64)(sc7val) << SEND_SC2VLT##num##_SC##sc7##_SHIFT)   \
220 )
221
222 #define DC_SC_VL_VAL( \
223         range, \
224         e0, e0val, \
225         e1, e1val, \
226         e2, e2val, \
227         e3, e3val, \
228         e4, e4val, \
229         e5, e5val, \
230         e6, e6val, \
231         e7, e7val, \
232         e8, e8val, \
233         e9, e9val, \
234         e10, e10val, \
235         e11, e11val, \
236         e12, e12val, \
237         e13, e13val, \
238         e14, e14val, \
239         e15, e15val) \
240 ( \
241         ((u64)(e0val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e0##_SHIFT) | \
242         ((u64)(e1val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e1##_SHIFT) | \
243         ((u64)(e2val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e2##_SHIFT) | \
244         ((u64)(e3val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e3##_SHIFT) | \
245         ((u64)(e4val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e4##_SHIFT) | \
246         ((u64)(e5val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e5##_SHIFT) | \
247         ((u64)(e6val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e6##_SHIFT) | \
248         ((u64)(e7val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e7##_SHIFT) | \
249         ((u64)(e8val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e8##_SHIFT) | \
250         ((u64)(e9val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e9##_SHIFT) | \
251         ((u64)(e10val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e10##_SHIFT) | \
252         ((u64)(e11val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e11##_SHIFT) | \
253         ((u64)(e12val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e12##_SHIFT) | \
254         ((u64)(e13val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e13##_SHIFT) | \
255         ((u64)(e14val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e14##_SHIFT) | \
256         ((u64)(e15val) << DCC_CFG_SC_VL_TABLE_##range##_ENTRY##e15##_SHIFT) \
257 )
258
259 /* all CceStatus sub-block freeze bits */
260 #define ALL_FROZE (CCE_STATUS_SDMA_FROZE_SMASK \
261                         | CCE_STATUS_RXE_FROZE_SMASK \
262                         | CCE_STATUS_TXE_FROZE_SMASK \
263                         | CCE_STATUS_TXE_PIO_FROZE_SMASK)
264 /* all CceStatus sub-block TXE pause bits */
265 #define ALL_TXE_PAUSE (CCE_STATUS_TXE_PIO_PAUSED_SMASK \
266                         | CCE_STATUS_TXE_PAUSED_SMASK \
267                         | CCE_STATUS_SDMA_PAUSED_SMASK)
268 /* all CceStatus sub-block RXE pause bits */
269 #define ALL_RXE_PAUSE CCE_STATUS_RXE_PAUSED_SMASK
270
271 #define CNTR_MAX 0xFFFFFFFFFFFFFFFFULL
272 #define CNTR_32BIT_MAX 0x00000000FFFFFFFF
273
274 /*
275  * CCE Error flags.
276  */
277 static struct flag_table cce_err_status_flags[] = {
278 /* 0*/  FLAG_ENTRY0("CceCsrParityErr",
279                 CCE_ERR_STATUS_CCE_CSR_PARITY_ERR_SMASK),
280 /* 1*/  FLAG_ENTRY0("CceCsrReadBadAddrErr",
281                 CCE_ERR_STATUS_CCE_CSR_READ_BAD_ADDR_ERR_SMASK),
282 /* 2*/  FLAG_ENTRY0("CceCsrWriteBadAddrErr",
283                 CCE_ERR_STATUS_CCE_CSR_WRITE_BAD_ADDR_ERR_SMASK),
284 /* 3*/  FLAG_ENTRY0("CceTrgtAsyncFifoParityErr",
285                 CCE_ERR_STATUS_CCE_TRGT_ASYNC_FIFO_PARITY_ERR_SMASK),
286 /* 4*/  FLAG_ENTRY0("CceTrgtAccessErr",
287                 CCE_ERR_STATUS_CCE_TRGT_ACCESS_ERR_SMASK),
288 /* 5*/  FLAG_ENTRY0("CceRspdDataParityErr",
289                 CCE_ERR_STATUS_CCE_RSPD_DATA_PARITY_ERR_SMASK),
290 /* 6*/  FLAG_ENTRY0("CceCli0AsyncFifoParityErr",
291                 CCE_ERR_STATUS_CCE_CLI0_ASYNC_FIFO_PARITY_ERR_SMASK),
292 /* 7*/  FLAG_ENTRY0("CceCsrCfgBusParityErr",
293                 CCE_ERR_STATUS_CCE_CSR_CFG_BUS_PARITY_ERR_SMASK),
294 /* 8*/  FLAG_ENTRY0("CceCli2AsyncFifoParityErr",
295                 CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK),
296 /* 9*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
297             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR_SMASK),
298 /*10*/  FLAG_ENTRY0("CceCli1AsyncFifoPioCrdtParityErr",
299             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR_SMASK),
300 /*11*/  FLAG_ENTRY0("CceCli1AsyncFifoRxdmaParityError",
301             CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERROR_SMASK),
302 /*12*/  FLAG_ENTRY0("CceCli1AsyncFifoDbgParityError",
303                 CCE_ERR_STATUS_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERROR_SMASK),
304 /*13*/  FLAG_ENTRY0("PcicRetryMemCorErr",
305                 CCE_ERR_STATUS_PCIC_RETRY_MEM_COR_ERR_SMASK),
306 /*14*/  FLAG_ENTRY0("PcicRetryMemCorErr",
307                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_COR_ERR_SMASK),
308 /*15*/  FLAG_ENTRY0("PcicPostHdQCorErr",
309                 CCE_ERR_STATUS_PCIC_POST_HD_QCOR_ERR_SMASK),
310 /*16*/  FLAG_ENTRY0("PcicPostHdQCorErr",
311                 CCE_ERR_STATUS_PCIC_POST_DAT_QCOR_ERR_SMASK),
312 /*17*/  FLAG_ENTRY0("PcicPostHdQCorErr",
313                 CCE_ERR_STATUS_PCIC_CPL_HD_QCOR_ERR_SMASK),
314 /*18*/  FLAG_ENTRY0("PcicCplDatQCorErr",
315                 CCE_ERR_STATUS_PCIC_CPL_DAT_QCOR_ERR_SMASK),
316 /*19*/  FLAG_ENTRY0("PcicNPostHQParityErr",
317                 CCE_ERR_STATUS_PCIC_NPOST_HQ_PARITY_ERR_SMASK),
318 /*20*/  FLAG_ENTRY0("PcicNPostDatQParityErr",
319                 CCE_ERR_STATUS_PCIC_NPOST_DAT_QPARITY_ERR_SMASK),
320 /*21*/  FLAG_ENTRY0("PcicRetryMemUncErr",
321                 CCE_ERR_STATUS_PCIC_RETRY_MEM_UNC_ERR_SMASK),
322 /*22*/  FLAG_ENTRY0("PcicRetrySotMemUncErr",
323                 CCE_ERR_STATUS_PCIC_RETRY_SOT_MEM_UNC_ERR_SMASK),
324 /*23*/  FLAG_ENTRY0("PcicPostHdQUncErr",
325                 CCE_ERR_STATUS_PCIC_POST_HD_QUNC_ERR_SMASK),
326 /*24*/  FLAG_ENTRY0("PcicPostDatQUncErr",
327                 CCE_ERR_STATUS_PCIC_POST_DAT_QUNC_ERR_SMASK),
328 /*25*/  FLAG_ENTRY0("PcicCplHdQUncErr",
329                 CCE_ERR_STATUS_PCIC_CPL_HD_QUNC_ERR_SMASK),
330 /*26*/  FLAG_ENTRY0("PcicCplDatQUncErr",
331                 CCE_ERR_STATUS_PCIC_CPL_DAT_QUNC_ERR_SMASK),
332 /*27*/  FLAG_ENTRY0("PcicTransmitFrontParityErr",
333                 CCE_ERR_STATUS_PCIC_TRANSMIT_FRONT_PARITY_ERR_SMASK),
334 /*28*/  FLAG_ENTRY0("PcicTransmitBackParityErr",
335                 CCE_ERR_STATUS_PCIC_TRANSMIT_BACK_PARITY_ERR_SMASK),
336 /*29*/  FLAG_ENTRY0("PcicReceiveParityErr",
337                 CCE_ERR_STATUS_PCIC_RECEIVE_PARITY_ERR_SMASK),
338 /*30*/  FLAG_ENTRY0("CceTrgtCplTimeoutErr",
339                 CCE_ERR_STATUS_CCE_TRGT_CPL_TIMEOUT_ERR_SMASK),
340 /*31*/  FLAG_ENTRY0("LATriggered",
341                 CCE_ERR_STATUS_LA_TRIGGERED_SMASK),
342 /*32*/  FLAG_ENTRY0("CceSegReadBadAddrErr",
343                 CCE_ERR_STATUS_CCE_SEG_READ_BAD_ADDR_ERR_SMASK),
344 /*33*/  FLAG_ENTRY0("CceSegWriteBadAddrErr",
345                 CCE_ERR_STATUS_CCE_SEG_WRITE_BAD_ADDR_ERR_SMASK),
346 /*34*/  FLAG_ENTRY0("CceRcplAsyncFifoParityErr",
347                 CCE_ERR_STATUS_CCE_RCPL_ASYNC_FIFO_PARITY_ERR_SMASK),
348 /*35*/  FLAG_ENTRY0("CceRxdmaConvFifoParityErr",
349                 CCE_ERR_STATUS_CCE_RXDMA_CONV_FIFO_PARITY_ERR_SMASK),
350 /*36*/  FLAG_ENTRY0("CceMsixTableCorErr",
351                 CCE_ERR_STATUS_CCE_MSIX_TABLE_COR_ERR_SMASK),
352 /*37*/  FLAG_ENTRY0("CceMsixTableUncErr",
353                 CCE_ERR_STATUS_CCE_MSIX_TABLE_UNC_ERR_SMASK),
354 /*38*/  FLAG_ENTRY0("CceIntMapCorErr",
355                 CCE_ERR_STATUS_CCE_INT_MAP_COR_ERR_SMASK),
356 /*39*/  FLAG_ENTRY0("CceIntMapUncErr",
357                 CCE_ERR_STATUS_CCE_INT_MAP_UNC_ERR_SMASK),
358 /*40*/  FLAG_ENTRY0("CceMsixCsrParityErr",
359                 CCE_ERR_STATUS_CCE_MSIX_CSR_PARITY_ERR_SMASK),
360 /*41-63 reserved*/
361 };
362
363 /*
364  * Misc Error flags
365  */
366 #define MES(text) MISC_ERR_STATUS_MISC_##text##_ERR_SMASK
367 static struct flag_table misc_err_status_flags[] = {
368 /* 0*/  FLAG_ENTRY0("CSR_PARITY", MES(CSR_PARITY)),
369 /* 1*/  FLAG_ENTRY0("CSR_READ_BAD_ADDR", MES(CSR_READ_BAD_ADDR)),
370 /* 2*/  FLAG_ENTRY0("CSR_WRITE_BAD_ADDR", MES(CSR_WRITE_BAD_ADDR)),
371 /* 3*/  FLAG_ENTRY0("SBUS_WRITE_FAILED", MES(SBUS_WRITE_FAILED)),
372 /* 4*/  FLAG_ENTRY0("KEY_MISMATCH", MES(KEY_MISMATCH)),
373 /* 5*/  FLAG_ENTRY0("FW_AUTH_FAILED", MES(FW_AUTH_FAILED)),
374 /* 6*/  FLAG_ENTRY0("EFUSE_CSR_PARITY", MES(EFUSE_CSR_PARITY)),
375 /* 7*/  FLAG_ENTRY0("EFUSE_READ_BAD_ADDR", MES(EFUSE_READ_BAD_ADDR)),
376 /* 8*/  FLAG_ENTRY0("EFUSE_WRITE", MES(EFUSE_WRITE)),
377 /* 9*/  FLAG_ENTRY0("EFUSE_DONE_PARITY", MES(EFUSE_DONE_PARITY)),
378 /*10*/  FLAG_ENTRY0("INVALID_EEP_CMD", MES(INVALID_EEP_CMD)),
379 /*11*/  FLAG_ENTRY0("MBIST_FAIL", MES(MBIST_FAIL)),
380 /*12*/  FLAG_ENTRY0("PLL_LOCK_FAIL", MES(PLL_LOCK_FAIL))
381 };
382
383 /*
384  * TXE PIO Error flags and consequences
385  */
386 static struct flag_table pio_err_status_flags[] = {
387 /* 0*/  FLAG_ENTRY("PioWriteBadCtxt",
388         SEC_WRITE_DROPPED,
389         SEND_PIO_ERR_STATUS_PIO_WRITE_BAD_CTXT_ERR_SMASK),
390 /* 1*/  FLAG_ENTRY("PioWriteAddrParity",
391         SEC_SPC_FREEZE,
392         SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK),
393 /* 2*/  FLAG_ENTRY("PioCsrParity",
394         SEC_SPC_FREEZE,
395         SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK),
396 /* 3*/  FLAG_ENTRY("PioSbMemFifo0",
397         SEC_SPC_FREEZE,
398         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK),
399 /* 4*/  FLAG_ENTRY("PioSbMemFifo1",
400         SEC_SPC_FREEZE,
401         SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK),
402 /* 5*/  FLAG_ENTRY("PioPccFifoParity",
403         SEC_SPC_FREEZE,
404         SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK),
405 /* 6*/  FLAG_ENTRY("PioPecFifoParity",
406         SEC_SPC_FREEZE,
407         SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK),
408 /* 7*/  FLAG_ENTRY("PioSbrdctlCrrelParity",
409         SEC_SPC_FREEZE,
410         SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK),
411 /* 8*/  FLAG_ENTRY("PioSbrdctrlCrrelFifoParity",
412         SEC_SPC_FREEZE,
413         SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK),
414 /* 9*/  FLAG_ENTRY("PioPktEvictFifoParityErr",
415         SEC_SPC_FREEZE,
416         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK),
417 /*10*/  FLAG_ENTRY("PioSmPktResetParity",
418         SEC_SPC_FREEZE,
419         SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK),
420 /*11*/  FLAG_ENTRY("PioVlLenMemBank0Unc",
421         SEC_SPC_FREEZE,
422         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK),
423 /*12*/  FLAG_ENTRY("PioVlLenMemBank1Unc",
424         SEC_SPC_FREEZE,
425         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK),
426 /*13*/  FLAG_ENTRY("PioVlLenMemBank0Cor",
427         0,
428         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_COR_ERR_SMASK),
429 /*14*/  FLAG_ENTRY("PioVlLenMemBank1Cor",
430         0,
431         SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_COR_ERR_SMASK),
432 /*15*/  FLAG_ENTRY("PioCreditRetFifoParity",
433         SEC_SPC_FREEZE,
434         SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK),
435 /*16*/  FLAG_ENTRY("PioPpmcPblFifo",
436         SEC_SPC_FREEZE,
437         SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK),
438 /*17*/  FLAG_ENTRY("PioInitSmIn",
439         0,
440         SEND_PIO_ERR_STATUS_PIO_INIT_SM_IN_ERR_SMASK),
441 /*18*/  FLAG_ENTRY("PioPktEvictSmOrArbSm",
442         SEC_SPC_FREEZE,
443         SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK),
444 /*19*/  FLAG_ENTRY("PioHostAddrMemUnc",
445         SEC_SPC_FREEZE,
446         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK),
447 /*20*/  FLAG_ENTRY("PioHostAddrMemCor",
448         0,
449         SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_COR_ERR_SMASK),
450 /*21*/  FLAG_ENTRY("PioWriteDataParity",
451         SEC_SPC_FREEZE,
452         SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK),
453 /*22*/  FLAG_ENTRY("PioStateMachine",
454         SEC_SPC_FREEZE,
455         SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK),
456 /*23*/  FLAG_ENTRY("PioWriteQwValidParity",
457         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
458         SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK),
459 /*24*/  FLAG_ENTRY("PioBlockQwCountParity",
460         SEC_WRITE_DROPPED | SEC_SPC_FREEZE,
461         SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK),
462 /*25*/  FLAG_ENTRY("PioVlfVlLenParity",
463         SEC_SPC_FREEZE,
464         SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK),
465 /*26*/  FLAG_ENTRY("PioVlfSopParity",
466         SEC_SPC_FREEZE,
467         SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK),
468 /*27*/  FLAG_ENTRY("PioVlFifoParity",
469         SEC_SPC_FREEZE,
470         SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK),
471 /*28*/  FLAG_ENTRY("PioPpmcBqcMemParity",
472         SEC_SPC_FREEZE,
473         SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK),
474 /*29*/  FLAG_ENTRY("PioPpmcSopLen",
475         SEC_SPC_FREEZE,
476         SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK),
477 /*30-31 reserved*/
478 /*32*/  FLAG_ENTRY("PioCurrentFreeCntParity",
479         SEC_SPC_FREEZE,
480         SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK),
481 /*33*/  FLAG_ENTRY("PioLastReturnedCntParity",
482         SEC_SPC_FREEZE,
483         SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK),
484 /*34*/  FLAG_ENTRY("PioPccSopHeadParity",
485         SEC_SPC_FREEZE,
486         SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK),
487 /*35*/  FLAG_ENTRY("PioPecSopHeadParityErr",
488         SEC_SPC_FREEZE,
489         SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK),
490 /*36-63 reserved*/
491 };
492
493 /* TXE PIO errors that cause an SPC freeze */
494 #define ALL_PIO_FREEZE_ERR \
495         (SEND_PIO_ERR_STATUS_PIO_WRITE_ADDR_PARITY_ERR_SMASK \
496         | SEND_PIO_ERR_STATUS_PIO_CSR_PARITY_ERR_SMASK \
497         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO0_ERR_SMASK \
498         | SEND_PIO_ERR_STATUS_PIO_SB_MEM_FIFO1_ERR_SMASK \
499         | SEND_PIO_ERR_STATUS_PIO_PCC_FIFO_PARITY_ERR_SMASK \
500         | SEND_PIO_ERR_STATUS_PIO_PEC_FIFO_PARITY_ERR_SMASK \
501         | SEND_PIO_ERR_STATUS_PIO_SBRDCTL_CRREL_PARITY_ERR_SMASK \
502         | SEND_PIO_ERR_STATUS_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR_SMASK \
503         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_FIFO_PARITY_ERR_SMASK \
504         | SEND_PIO_ERR_STATUS_PIO_SM_PKT_RESET_PARITY_ERR_SMASK \
505         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK0_UNC_ERR_SMASK \
506         | SEND_PIO_ERR_STATUS_PIO_VL_LEN_MEM_BANK1_UNC_ERR_SMASK \
507         | SEND_PIO_ERR_STATUS_PIO_CREDIT_RET_FIFO_PARITY_ERR_SMASK \
508         | SEND_PIO_ERR_STATUS_PIO_PPMC_PBL_FIFO_ERR_SMASK \
509         | SEND_PIO_ERR_STATUS_PIO_PKT_EVICT_SM_OR_ARB_SM_ERR_SMASK \
510         | SEND_PIO_ERR_STATUS_PIO_HOST_ADDR_MEM_UNC_ERR_SMASK \
511         | SEND_PIO_ERR_STATUS_PIO_WRITE_DATA_PARITY_ERR_SMASK \
512         | SEND_PIO_ERR_STATUS_PIO_STATE_MACHINE_ERR_SMASK \
513         | SEND_PIO_ERR_STATUS_PIO_WRITE_QW_VALID_PARITY_ERR_SMASK \
514         | SEND_PIO_ERR_STATUS_PIO_BLOCK_QW_COUNT_PARITY_ERR_SMASK \
515         | SEND_PIO_ERR_STATUS_PIO_VLF_VL_LEN_PARITY_ERR_SMASK \
516         | SEND_PIO_ERR_STATUS_PIO_VLF_SOP_PARITY_ERR_SMASK \
517         | SEND_PIO_ERR_STATUS_PIO_VL_FIFO_PARITY_ERR_SMASK \
518         | SEND_PIO_ERR_STATUS_PIO_PPMC_BQC_MEM_PARITY_ERR_SMASK \
519         | SEND_PIO_ERR_STATUS_PIO_PPMC_SOP_LEN_ERR_SMASK \
520         | SEND_PIO_ERR_STATUS_PIO_CURRENT_FREE_CNT_PARITY_ERR_SMASK \
521         | SEND_PIO_ERR_STATUS_PIO_LAST_RETURNED_CNT_PARITY_ERR_SMASK \
522         | SEND_PIO_ERR_STATUS_PIO_PCC_SOP_HEAD_PARITY_ERR_SMASK \
523         | SEND_PIO_ERR_STATUS_PIO_PEC_SOP_HEAD_PARITY_ERR_SMASK)
524
525 /*
526  * TXE SDMA Error flags
527  */
528 static struct flag_table sdma_err_status_flags[] = {
529 /* 0*/  FLAG_ENTRY0("SDmaRpyTagErr",
530                 SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK),
531 /* 1*/  FLAG_ENTRY0("SDmaCsrParityErr",
532                 SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK),
533 /* 2*/  FLAG_ENTRY0("SDmaPcieReqTrackingUncErr",
534                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK),
535 /* 3*/  FLAG_ENTRY0("SDmaPcieReqTrackingCorErr",
536                 SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_COR_ERR_SMASK),
537 /*04-63 reserved*/
538 };
539
540 /* TXE SDMA errors that cause an SPC freeze */
541 #define ALL_SDMA_FREEZE_ERR  \
542                 (SEND_DMA_ERR_STATUS_SDMA_RPY_TAG_ERR_SMASK \
543                 | SEND_DMA_ERR_STATUS_SDMA_CSR_PARITY_ERR_SMASK \
544                 | SEND_DMA_ERR_STATUS_SDMA_PCIE_REQ_TRACKING_UNC_ERR_SMASK)
545
546 /* SendEgressErrInfo bits that correspond to a PortXmitDiscard counter */
547 #define PORT_DISCARD_EGRESS_ERRS \
548         (SEND_EGRESS_ERR_INFO_TOO_LONG_IB_PACKET_ERR_SMASK \
549         | SEND_EGRESS_ERR_INFO_VL_MAPPING_ERR_SMASK \
550         | SEND_EGRESS_ERR_INFO_VL_ERR_SMASK)
551
552 /*
553  * TXE Egress Error flags
554  */
555 #define SEES(text) SEND_EGRESS_ERR_STATUS_##text##_ERR_SMASK
556 static struct flag_table egress_err_status_flags[] = {
557 /* 0*/  FLAG_ENTRY0("TxPktIntegrityMemCorErr", SEES(TX_PKT_INTEGRITY_MEM_COR)),
558 /* 1*/  FLAG_ENTRY0("TxPktIntegrityMemUncErr", SEES(TX_PKT_INTEGRITY_MEM_UNC)),
559 /* 2 reserved */
560 /* 3*/  FLAG_ENTRY0("TxEgressFifoUnderrunOrParityErr",
561                 SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY)),
562 /* 4*/  FLAG_ENTRY0("TxLinkdownErr", SEES(TX_LINKDOWN)),
563 /* 5*/  FLAG_ENTRY0("TxIncorrectLinkStateErr", SEES(TX_INCORRECT_LINK_STATE)),
564 /* 6 reserved */
565 /* 7*/  FLAG_ENTRY0("TxPioLaunchIntfParityErr",
566                 SEES(TX_PIO_LAUNCH_INTF_PARITY)),
567 /* 8*/  FLAG_ENTRY0("TxSdmaLaunchIntfParityErr",
568                 SEES(TX_SDMA_LAUNCH_INTF_PARITY)),
569 /* 9-10 reserved */
570 /*11*/  FLAG_ENTRY0("TxSbrdCtlStateMachineParityErr",
571                 SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY)),
572 /*12*/  FLAG_ENTRY0("TxIllegalVLErr", SEES(TX_ILLEGAL_VL)),
573 /*13*/  FLAG_ENTRY0("TxLaunchCsrParityErr", SEES(TX_LAUNCH_CSR_PARITY)),
574 /*14*/  FLAG_ENTRY0("TxSbrdCtlCsrParityErr", SEES(TX_SBRD_CTL_CSR_PARITY)),
575 /*15*/  FLAG_ENTRY0("TxConfigParityErr", SEES(TX_CONFIG_PARITY)),
576 /*16*/  FLAG_ENTRY0("TxSdma0DisallowedPacketErr",
577                 SEES(TX_SDMA0_DISALLOWED_PACKET)),
578 /*17*/  FLAG_ENTRY0("TxSdma1DisallowedPacketErr",
579                 SEES(TX_SDMA1_DISALLOWED_PACKET)),
580 /*18*/  FLAG_ENTRY0("TxSdma2DisallowedPacketErr",
581                 SEES(TX_SDMA2_DISALLOWED_PACKET)),
582 /*19*/  FLAG_ENTRY0("TxSdma3DisallowedPacketErr",
583                 SEES(TX_SDMA3_DISALLOWED_PACKET)),
584 /*20*/  FLAG_ENTRY0("TxSdma4DisallowedPacketErr",
585                 SEES(TX_SDMA4_DISALLOWED_PACKET)),
586 /*21*/  FLAG_ENTRY0("TxSdma5DisallowedPacketErr",
587                 SEES(TX_SDMA5_DISALLOWED_PACKET)),
588 /*22*/  FLAG_ENTRY0("TxSdma6DisallowedPacketErr",
589                 SEES(TX_SDMA6_DISALLOWED_PACKET)),
590 /*23*/  FLAG_ENTRY0("TxSdma7DisallowedPacketErr",
591                 SEES(TX_SDMA7_DISALLOWED_PACKET)),
592 /*24*/  FLAG_ENTRY0("TxSdma8DisallowedPacketErr",
593                 SEES(TX_SDMA8_DISALLOWED_PACKET)),
594 /*25*/  FLAG_ENTRY0("TxSdma9DisallowedPacketErr",
595                 SEES(TX_SDMA9_DISALLOWED_PACKET)),
596 /*26*/  FLAG_ENTRY0("TxSdma10DisallowedPacketErr",
597                 SEES(TX_SDMA10_DISALLOWED_PACKET)),
598 /*27*/  FLAG_ENTRY0("TxSdma11DisallowedPacketErr",
599                 SEES(TX_SDMA11_DISALLOWED_PACKET)),
600 /*28*/  FLAG_ENTRY0("TxSdma12DisallowedPacketErr",
601                 SEES(TX_SDMA12_DISALLOWED_PACKET)),
602 /*29*/  FLAG_ENTRY0("TxSdma13DisallowedPacketErr",
603                 SEES(TX_SDMA13_DISALLOWED_PACKET)),
604 /*30*/  FLAG_ENTRY0("TxSdma14DisallowedPacketErr",
605                 SEES(TX_SDMA14_DISALLOWED_PACKET)),
606 /*31*/  FLAG_ENTRY0("TxSdma15DisallowedPacketErr",
607                 SEES(TX_SDMA15_DISALLOWED_PACKET)),
608 /*32*/  FLAG_ENTRY0("TxLaunchFifo0UncOrParityErr",
609                 SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY)),
610 /*33*/  FLAG_ENTRY0("TxLaunchFifo1UncOrParityErr",
611                 SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY)),
612 /*34*/  FLAG_ENTRY0("TxLaunchFifo2UncOrParityErr",
613                 SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY)),
614 /*35*/  FLAG_ENTRY0("TxLaunchFifo3UncOrParityErr",
615                 SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY)),
616 /*36*/  FLAG_ENTRY0("TxLaunchFifo4UncOrParityErr",
617                 SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY)),
618 /*37*/  FLAG_ENTRY0("TxLaunchFifo5UncOrParityErr",
619                 SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY)),
620 /*38*/  FLAG_ENTRY0("TxLaunchFifo6UncOrParityErr",
621                 SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY)),
622 /*39*/  FLAG_ENTRY0("TxLaunchFifo7UncOrParityErr",
623                 SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY)),
624 /*40*/  FLAG_ENTRY0("TxLaunchFifo8UncOrParityErr",
625                 SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY)),
626 /*41*/  FLAG_ENTRY0("TxCreditReturnParityErr", SEES(TX_CREDIT_RETURN_PARITY)),
627 /*42*/  FLAG_ENTRY0("TxSbHdrUncErr", SEES(TX_SB_HDR_UNC)),
628 /*43*/  FLAG_ENTRY0("TxReadSdmaMemoryUncErr", SEES(TX_READ_SDMA_MEMORY_UNC)),
629 /*44*/  FLAG_ENTRY0("TxReadPioMemoryUncErr", SEES(TX_READ_PIO_MEMORY_UNC)),
630 /*45*/  FLAG_ENTRY0("TxEgressFifoUncErr", SEES(TX_EGRESS_FIFO_UNC)),
631 /*46*/  FLAG_ENTRY0("TxHcrcInsertionErr", SEES(TX_HCRC_INSERTION)),
632 /*47*/  FLAG_ENTRY0("TxCreditReturnVLErr", SEES(TX_CREDIT_RETURN_VL)),
633 /*48*/  FLAG_ENTRY0("TxLaunchFifo0CorErr", SEES(TX_LAUNCH_FIFO0_COR)),
634 /*49*/  FLAG_ENTRY0("TxLaunchFifo1CorErr", SEES(TX_LAUNCH_FIFO1_COR)),
635 /*50*/  FLAG_ENTRY0("TxLaunchFifo2CorErr", SEES(TX_LAUNCH_FIFO2_COR)),
636 /*51*/  FLAG_ENTRY0("TxLaunchFifo3CorErr", SEES(TX_LAUNCH_FIFO3_COR)),
637 /*52*/  FLAG_ENTRY0("TxLaunchFifo4CorErr", SEES(TX_LAUNCH_FIFO4_COR)),
638 /*53*/  FLAG_ENTRY0("TxLaunchFifo5CorErr", SEES(TX_LAUNCH_FIFO5_COR)),
639 /*54*/  FLAG_ENTRY0("TxLaunchFifo6CorErr", SEES(TX_LAUNCH_FIFO6_COR)),
640 /*55*/  FLAG_ENTRY0("TxLaunchFifo7CorErr", SEES(TX_LAUNCH_FIFO7_COR)),
641 /*56*/  FLAG_ENTRY0("TxLaunchFifo8CorErr", SEES(TX_LAUNCH_FIFO8_COR)),
642 /*57*/  FLAG_ENTRY0("TxCreditOverrunErr", SEES(TX_CREDIT_OVERRUN)),
643 /*58*/  FLAG_ENTRY0("TxSbHdrCorErr", SEES(TX_SB_HDR_COR)),
644 /*59*/  FLAG_ENTRY0("TxReadSdmaMemoryCorErr", SEES(TX_READ_SDMA_MEMORY_COR)),
645 /*60*/  FLAG_ENTRY0("TxReadPioMemoryCorErr", SEES(TX_READ_PIO_MEMORY_COR)),
646 /*61*/  FLAG_ENTRY0("TxEgressFifoCorErr", SEES(TX_EGRESS_FIFO_COR)),
647 /*62*/  FLAG_ENTRY0("TxReadSdmaMemoryCsrUncErr",
648                 SEES(TX_READ_SDMA_MEMORY_CSR_UNC)),
649 /*63*/  FLAG_ENTRY0("TxReadPioMemoryCsrUncErr",
650                 SEES(TX_READ_PIO_MEMORY_CSR_UNC)),
651 };
652
653 /*
654  * TXE Egress Error Info flags
655  */
656 #define SEEI(text) SEND_EGRESS_ERR_INFO_##text##_ERR_SMASK
657 static struct flag_table egress_err_info_flags[] = {
658 /* 0*/  FLAG_ENTRY0("Reserved", 0ull),
659 /* 1*/  FLAG_ENTRY0("VLErr", SEEI(VL)),
660 /* 2*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
661 /* 3*/  FLAG_ENTRY0("JobKeyErr", SEEI(JOB_KEY)),
662 /* 4*/  FLAG_ENTRY0("PartitionKeyErr", SEEI(PARTITION_KEY)),
663 /* 5*/  FLAG_ENTRY0("SLIDErr", SEEI(SLID)),
664 /* 6*/  FLAG_ENTRY0("OpcodeErr", SEEI(OPCODE)),
665 /* 7*/  FLAG_ENTRY0("VLMappingErr", SEEI(VL_MAPPING)),
666 /* 8*/  FLAG_ENTRY0("RawErr", SEEI(RAW)),
667 /* 9*/  FLAG_ENTRY0("RawIPv6Err", SEEI(RAW_IPV6)),
668 /*10*/  FLAG_ENTRY0("GRHErr", SEEI(GRH)),
669 /*11*/  FLAG_ENTRY0("BypassErr", SEEI(BYPASS)),
670 /*12*/  FLAG_ENTRY0("KDETHPacketsErr", SEEI(KDETH_PACKETS)),
671 /*13*/  FLAG_ENTRY0("NonKDETHPacketsErr", SEEI(NON_KDETH_PACKETS)),
672 /*14*/  FLAG_ENTRY0("TooSmallIBPacketsErr", SEEI(TOO_SMALL_IB_PACKETS)),
673 /*15*/  FLAG_ENTRY0("TooSmallBypassPacketsErr", SEEI(TOO_SMALL_BYPASS_PACKETS)),
674 /*16*/  FLAG_ENTRY0("PbcTestErr", SEEI(PBC_TEST)),
675 /*17*/  FLAG_ENTRY0("BadPktLenErr", SEEI(BAD_PKT_LEN)),
676 /*18*/  FLAG_ENTRY0("TooLongIBPacketErr", SEEI(TOO_LONG_IB_PACKET)),
677 /*19*/  FLAG_ENTRY0("TooLongBypassPacketsErr", SEEI(TOO_LONG_BYPASS_PACKETS)),
678 /*20*/  FLAG_ENTRY0("PbcStaticRateControlErr", SEEI(PBC_STATIC_RATE_CONTROL)),
679 /*21*/  FLAG_ENTRY0("BypassBadPktLenErr", SEEI(BAD_PKT_LEN)),
680 };
681
682 /* TXE Egress errors that cause an SPC freeze */
683 #define ALL_TXE_EGRESS_FREEZE_ERR \
684         (SEES(TX_EGRESS_FIFO_UNDERRUN_OR_PARITY) \
685         | SEES(TX_PIO_LAUNCH_INTF_PARITY) \
686         | SEES(TX_SDMA_LAUNCH_INTF_PARITY) \
687         | SEES(TX_SBRD_CTL_STATE_MACHINE_PARITY) \
688         | SEES(TX_LAUNCH_CSR_PARITY) \
689         | SEES(TX_SBRD_CTL_CSR_PARITY) \
690         | SEES(TX_CONFIG_PARITY) \
691         | SEES(TX_LAUNCH_FIFO0_UNC_OR_PARITY) \
692         | SEES(TX_LAUNCH_FIFO1_UNC_OR_PARITY) \
693         | SEES(TX_LAUNCH_FIFO2_UNC_OR_PARITY) \
694         | SEES(TX_LAUNCH_FIFO3_UNC_OR_PARITY) \
695         | SEES(TX_LAUNCH_FIFO4_UNC_OR_PARITY) \
696         | SEES(TX_LAUNCH_FIFO5_UNC_OR_PARITY) \
697         | SEES(TX_LAUNCH_FIFO6_UNC_OR_PARITY) \
698         | SEES(TX_LAUNCH_FIFO7_UNC_OR_PARITY) \
699         | SEES(TX_LAUNCH_FIFO8_UNC_OR_PARITY) \
700         | SEES(TX_CREDIT_RETURN_PARITY))
701
702 /*
703  * TXE Send error flags
704  */
705 #define SES(name) SEND_ERR_STATUS_SEND_##name##_ERR_SMASK
706 static struct flag_table send_err_status_flags[] = {
707 /* 0*/  FLAG_ENTRY0("SendCsrParityErr", SES(CSR_PARITY)),
708 /* 1*/  FLAG_ENTRY0("SendCsrReadBadAddrErr", SES(CSR_READ_BAD_ADDR)),
709 /* 2*/  FLAG_ENTRY0("SendCsrWriteBadAddrErr", SES(CSR_WRITE_BAD_ADDR))
710 };
711
712 /*
713  * TXE Send Context Error flags and consequences
714  */
715 static struct flag_table sc_err_status_flags[] = {
716 /* 0*/  FLAG_ENTRY("InconsistentSop",
717                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
718                 SEND_CTXT_ERR_STATUS_PIO_INCONSISTENT_SOP_ERR_SMASK),
719 /* 1*/  FLAG_ENTRY("DisallowedPacket",
720                 SEC_PACKET_DROPPED | SEC_SC_HALTED,
721                 SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK),
722 /* 2*/  FLAG_ENTRY("WriteCrossesBoundary",
723                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
724                 SEND_CTXT_ERR_STATUS_PIO_WRITE_CROSSES_BOUNDARY_ERR_SMASK),
725 /* 3*/  FLAG_ENTRY("WriteOverflow",
726                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
727                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OVERFLOW_ERR_SMASK),
728 /* 4*/  FLAG_ENTRY("WriteOutOfBounds",
729                 SEC_WRITE_DROPPED | SEC_SC_HALTED,
730                 SEND_CTXT_ERR_STATUS_PIO_WRITE_OUT_OF_BOUNDS_ERR_SMASK),
731 /* 5-63 reserved*/
732 };
733
734 /*
735  * RXE Receive Error flags
736  */
737 #define RXES(name) RCV_ERR_STATUS_RX_##name##_ERR_SMASK
738 static struct flag_table rxe_err_status_flags[] = {
739 /* 0*/  FLAG_ENTRY0("RxDmaCsrCorErr", RXES(DMA_CSR_COR)),
740 /* 1*/  FLAG_ENTRY0("RxDcIntfParityErr", RXES(DC_INTF_PARITY)),
741 /* 2*/  FLAG_ENTRY0("RxRcvHdrUncErr", RXES(RCV_HDR_UNC)),
742 /* 3*/  FLAG_ENTRY0("RxRcvHdrCorErr", RXES(RCV_HDR_COR)),
743 /* 4*/  FLAG_ENTRY0("RxRcvDataUncErr", RXES(RCV_DATA_UNC)),
744 /* 5*/  FLAG_ENTRY0("RxRcvDataCorErr", RXES(RCV_DATA_COR)),
745 /* 6*/  FLAG_ENTRY0("RxRcvQpMapTableUncErr", RXES(RCV_QP_MAP_TABLE_UNC)),
746 /* 7*/  FLAG_ENTRY0("RxRcvQpMapTableCorErr", RXES(RCV_QP_MAP_TABLE_COR)),
747 /* 8*/  FLAG_ENTRY0("RxRcvCsrParityErr", RXES(RCV_CSR_PARITY)),
748 /* 9*/  FLAG_ENTRY0("RxDcSopEopParityErr", RXES(DC_SOP_EOP_PARITY)),
749 /*10*/  FLAG_ENTRY0("RxDmaFlagUncErr", RXES(DMA_FLAG_UNC)),
750 /*11*/  FLAG_ENTRY0("RxDmaFlagCorErr", RXES(DMA_FLAG_COR)),
751 /*12*/  FLAG_ENTRY0("RxRcvFsmEncodingErr", RXES(RCV_FSM_ENCODING)),
752 /*13*/  FLAG_ENTRY0("RxRbufFreeListUncErr", RXES(RBUF_FREE_LIST_UNC)),
753 /*14*/  FLAG_ENTRY0("RxRbufFreeListCorErr", RXES(RBUF_FREE_LIST_COR)),
754 /*15*/  FLAG_ENTRY0("RxRbufLookupDesRegUncErr", RXES(RBUF_LOOKUP_DES_REG_UNC)),
755 /*16*/  FLAG_ENTRY0("RxRbufLookupDesRegUncCorErr",
756                 RXES(RBUF_LOOKUP_DES_REG_UNC_COR)),
757 /*17*/  FLAG_ENTRY0("RxRbufLookupDesUncErr", RXES(RBUF_LOOKUP_DES_UNC)),
758 /*18*/  FLAG_ENTRY0("RxRbufLookupDesCorErr", RXES(RBUF_LOOKUP_DES_COR)),
759 /*19*/  FLAG_ENTRY0("RxRbufBlockListReadUncErr",
760                 RXES(RBUF_BLOCK_LIST_READ_UNC)),
761 /*20*/  FLAG_ENTRY0("RxRbufBlockListReadCorErr",
762                 RXES(RBUF_BLOCK_LIST_READ_COR)),
763 /*21*/  FLAG_ENTRY0("RxRbufCsrQHeadBufNumParityErr",
764                 RXES(RBUF_CSR_QHEAD_BUF_NUM_PARITY)),
765 /*22*/  FLAG_ENTRY0("RxRbufCsrQEntCntParityErr",
766                 RXES(RBUF_CSR_QENT_CNT_PARITY)),
767 /*23*/  FLAG_ENTRY0("RxRbufCsrQNextBufParityErr",
768                 RXES(RBUF_CSR_QNEXT_BUF_PARITY)),
769 /*24*/  FLAG_ENTRY0("RxRbufCsrQVldBitParityErr",
770                 RXES(RBUF_CSR_QVLD_BIT_PARITY)),
771 /*25*/  FLAG_ENTRY0("RxRbufCsrQHdPtrParityErr", RXES(RBUF_CSR_QHD_PTR_PARITY)),
772 /*26*/  FLAG_ENTRY0("RxRbufCsrQTlPtrParityErr", RXES(RBUF_CSR_QTL_PTR_PARITY)),
773 /*27*/  FLAG_ENTRY0("RxRbufCsrQNumOfPktParityErr",
774                 RXES(RBUF_CSR_QNUM_OF_PKT_PARITY)),
775 /*28*/  FLAG_ENTRY0("RxRbufCsrQEOPDWParityErr", RXES(RBUF_CSR_QEOPDW_PARITY)),
776 /*29*/  FLAG_ENTRY0("RxRbufCtxIdParityErr", RXES(RBUF_CTX_ID_PARITY)),
777 /*30*/  FLAG_ENTRY0("RxRBufBadLookupErr", RXES(RBUF_BAD_LOOKUP)),
778 /*31*/  FLAG_ENTRY0("RxRbufFullErr", RXES(RBUF_FULL)),
779 /*32*/  FLAG_ENTRY0("RxRbufEmptyErr", RXES(RBUF_EMPTY)),
780 /*33*/  FLAG_ENTRY0("RxRbufFlRdAddrParityErr", RXES(RBUF_FL_RD_ADDR_PARITY)),
781 /*34*/  FLAG_ENTRY0("RxRbufFlWrAddrParityErr", RXES(RBUF_FL_WR_ADDR_PARITY)),
782 /*35*/  FLAG_ENTRY0("RxRbufFlInitdoneParityErr",
783                 RXES(RBUF_FL_INITDONE_PARITY)),
784 /*36*/  FLAG_ENTRY0("RxRbufFlInitWrAddrParityErr",
785                 RXES(RBUF_FL_INIT_WR_ADDR_PARITY)),
786 /*37*/  FLAG_ENTRY0("RxRbufNextFreeBufUncErr", RXES(RBUF_NEXT_FREE_BUF_UNC)),
787 /*38*/  FLAG_ENTRY0("RxRbufNextFreeBufCorErr", RXES(RBUF_NEXT_FREE_BUF_COR)),
788 /*39*/  FLAG_ENTRY0("RxLookupDesPart1UncErr", RXES(LOOKUP_DES_PART1_UNC)),
789 /*40*/  FLAG_ENTRY0("RxLookupDesPart1UncCorErr",
790                 RXES(LOOKUP_DES_PART1_UNC_COR)),
791 /*41*/  FLAG_ENTRY0("RxLookupDesPart2ParityErr",
792                 RXES(LOOKUP_DES_PART2_PARITY)),
793 /*42*/  FLAG_ENTRY0("RxLookupRcvArrayUncErr", RXES(LOOKUP_RCV_ARRAY_UNC)),
794 /*43*/  FLAG_ENTRY0("RxLookupRcvArrayCorErr", RXES(LOOKUP_RCV_ARRAY_COR)),
795 /*44*/  FLAG_ENTRY0("RxLookupCsrParityErr", RXES(LOOKUP_CSR_PARITY)),
796 /*45*/  FLAG_ENTRY0("RxHqIntrCsrParityErr", RXES(HQ_INTR_CSR_PARITY)),
797 /*46*/  FLAG_ENTRY0("RxHqIntrFsmErr", RXES(HQ_INTR_FSM)),
798 /*47*/  FLAG_ENTRY0("RxRbufDescPart1UncErr", RXES(RBUF_DESC_PART1_UNC)),
799 /*48*/  FLAG_ENTRY0("RxRbufDescPart1CorErr", RXES(RBUF_DESC_PART1_COR)),
800 /*49*/  FLAG_ENTRY0("RxRbufDescPart2UncErr", RXES(RBUF_DESC_PART2_UNC)),
801 /*50*/  FLAG_ENTRY0("RxRbufDescPart2CorErr", RXES(RBUF_DESC_PART2_COR)),
802 /*51*/  FLAG_ENTRY0("RxDmaHdrFifoRdUncErr", RXES(DMA_HDR_FIFO_RD_UNC)),
803 /*52*/  FLAG_ENTRY0("RxDmaHdrFifoRdCorErr", RXES(DMA_HDR_FIFO_RD_COR)),
804 /*53*/  FLAG_ENTRY0("RxDmaDataFifoRdUncErr", RXES(DMA_DATA_FIFO_RD_UNC)),
805 /*54*/  FLAG_ENTRY0("RxDmaDataFifoRdCorErr", RXES(DMA_DATA_FIFO_RD_COR)),
806 /*55*/  FLAG_ENTRY0("RxRbufDataUncErr", RXES(RBUF_DATA_UNC)),
807 /*56*/  FLAG_ENTRY0("RxRbufDataCorErr", RXES(RBUF_DATA_COR)),
808 /*57*/  FLAG_ENTRY0("RxDmaCsrParityErr", RXES(DMA_CSR_PARITY)),
809 /*58*/  FLAG_ENTRY0("RxDmaEqFsmEncodingErr", RXES(DMA_EQ_FSM_ENCODING)),
810 /*59*/  FLAG_ENTRY0("RxDmaDqFsmEncodingErr", RXES(DMA_DQ_FSM_ENCODING)),
811 /*60*/  FLAG_ENTRY0("RxDmaCsrUncErr", RXES(DMA_CSR_UNC)),
812 /*61*/  FLAG_ENTRY0("RxCsrReadBadAddrErr", RXES(CSR_READ_BAD_ADDR)),
813 /*62*/  FLAG_ENTRY0("RxCsrWriteBadAddrErr", RXES(CSR_WRITE_BAD_ADDR)),
814 /*63*/  FLAG_ENTRY0("RxCsrParityErr", RXES(CSR_PARITY))
815 };
816
817 /* RXE errors that will trigger an SPC freeze */
818 #define ALL_RXE_FREEZE_ERR  \
819         (RCV_ERR_STATUS_RX_RCV_QP_MAP_TABLE_UNC_ERR_SMASK \
820         | RCV_ERR_STATUS_RX_RCV_CSR_PARITY_ERR_SMASK \
821         | RCV_ERR_STATUS_RX_DMA_FLAG_UNC_ERR_SMASK \
822         | RCV_ERR_STATUS_RX_RCV_FSM_ENCODING_ERR_SMASK \
823         | RCV_ERR_STATUS_RX_RBUF_FREE_LIST_UNC_ERR_SMASK \
824         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_ERR_SMASK \
825         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR_SMASK \
826         | RCV_ERR_STATUS_RX_RBUF_LOOKUP_DES_UNC_ERR_SMASK \
827         | RCV_ERR_STATUS_RX_RBUF_BLOCK_LIST_READ_UNC_ERR_SMASK \
828         | RCV_ERR_STATUS_RX_RBUF_CSR_QHEAD_BUF_NUM_PARITY_ERR_SMASK \
829         | RCV_ERR_STATUS_RX_RBUF_CSR_QENT_CNT_PARITY_ERR_SMASK \
830         | RCV_ERR_STATUS_RX_RBUF_CSR_QNEXT_BUF_PARITY_ERR_SMASK \
831         | RCV_ERR_STATUS_RX_RBUF_CSR_QVLD_BIT_PARITY_ERR_SMASK \
832         | RCV_ERR_STATUS_RX_RBUF_CSR_QHD_PTR_PARITY_ERR_SMASK \
833         | RCV_ERR_STATUS_RX_RBUF_CSR_QTL_PTR_PARITY_ERR_SMASK \
834         | RCV_ERR_STATUS_RX_RBUF_CSR_QNUM_OF_PKT_PARITY_ERR_SMASK \
835         | RCV_ERR_STATUS_RX_RBUF_CSR_QEOPDW_PARITY_ERR_SMASK \
836         | RCV_ERR_STATUS_RX_RBUF_CTX_ID_PARITY_ERR_SMASK \
837         | RCV_ERR_STATUS_RX_RBUF_BAD_LOOKUP_ERR_SMASK \
838         | RCV_ERR_STATUS_RX_RBUF_FULL_ERR_SMASK \
839         | RCV_ERR_STATUS_RX_RBUF_EMPTY_ERR_SMASK \
840         | RCV_ERR_STATUS_RX_RBUF_FL_RD_ADDR_PARITY_ERR_SMASK \
841         | RCV_ERR_STATUS_RX_RBUF_FL_WR_ADDR_PARITY_ERR_SMASK \
842         | RCV_ERR_STATUS_RX_RBUF_FL_INITDONE_PARITY_ERR_SMASK \
843         | RCV_ERR_STATUS_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR_SMASK \
844         | RCV_ERR_STATUS_RX_RBUF_NEXT_FREE_BUF_UNC_ERR_SMASK \
845         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_ERR_SMASK \
846         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART1_UNC_COR_ERR_SMASK \
847         | RCV_ERR_STATUS_RX_LOOKUP_DES_PART2_PARITY_ERR_SMASK \
848         | RCV_ERR_STATUS_RX_LOOKUP_RCV_ARRAY_UNC_ERR_SMASK \
849         | RCV_ERR_STATUS_RX_LOOKUP_CSR_PARITY_ERR_SMASK \
850         | RCV_ERR_STATUS_RX_HQ_INTR_CSR_PARITY_ERR_SMASK \
851         | RCV_ERR_STATUS_RX_HQ_INTR_FSM_ERR_SMASK \
852         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_UNC_ERR_SMASK \
853         | RCV_ERR_STATUS_RX_RBUF_DESC_PART1_COR_ERR_SMASK \
854         | RCV_ERR_STATUS_RX_RBUF_DESC_PART2_UNC_ERR_SMASK \
855         | RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK \
856         | RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK \
857         | RCV_ERR_STATUS_RX_RBUF_DATA_UNC_ERR_SMASK \
858         | RCV_ERR_STATUS_RX_DMA_CSR_PARITY_ERR_SMASK \
859         | RCV_ERR_STATUS_RX_DMA_EQ_FSM_ENCODING_ERR_SMASK \
860         | RCV_ERR_STATUS_RX_DMA_DQ_FSM_ENCODING_ERR_SMASK \
861         | RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK \
862         | RCV_ERR_STATUS_RX_CSR_PARITY_ERR_SMASK)
863
864 #define RXE_FREEZE_ABORT_MASK \
865         (RCV_ERR_STATUS_RX_DMA_CSR_UNC_ERR_SMASK | \
866         RCV_ERR_STATUS_RX_DMA_HDR_FIFO_RD_UNC_ERR_SMASK | \
867         RCV_ERR_STATUS_RX_DMA_DATA_FIFO_RD_UNC_ERR_SMASK)
868
869 /*
870  * DCC Error Flags
871  */
872 #define DCCE(name) DCC_ERR_FLG_##name##_SMASK
873 static struct flag_table dcc_err_flags[] = {
874         FLAG_ENTRY0("bad_l2_err", DCCE(BAD_L2_ERR)),
875         FLAG_ENTRY0("bad_sc_err", DCCE(BAD_SC_ERR)),
876         FLAG_ENTRY0("bad_mid_tail_err", DCCE(BAD_MID_TAIL_ERR)),
877         FLAG_ENTRY0("bad_preemption_err", DCCE(BAD_PREEMPTION_ERR)),
878         FLAG_ENTRY0("preemption_err", DCCE(PREEMPTION_ERR)),
879         FLAG_ENTRY0("preemptionvl15_err", DCCE(PREEMPTIONVL15_ERR)),
880         FLAG_ENTRY0("bad_vl_marker_err", DCCE(BAD_VL_MARKER_ERR)),
881         FLAG_ENTRY0("bad_dlid_target_err", DCCE(BAD_DLID_TARGET_ERR)),
882         FLAG_ENTRY0("bad_lver_err", DCCE(BAD_LVER_ERR)),
883         FLAG_ENTRY0("uncorrectable_err", DCCE(UNCORRECTABLE_ERR)),
884         FLAG_ENTRY0("bad_crdt_ack_err", DCCE(BAD_CRDT_ACK_ERR)),
885         FLAG_ENTRY0("unsup_pkt_type", DCCE(UNSUP_PKT_TYPE)),
886         FLAG_ENTRY0("bad_ctrl_flit_err", DCCE(BAD_CTRL_FLIT_ERR)),
887         FLAG_ENTRY0("event_cntr_parity_err", DCCE(EVENT_CNTR_PARITY_ERR)),
888         FLAG_ENTRY0("event_cntr_rollover_err", DCCE(EVENT_CNTR_ROLLOVER_ERR)),
889         FLAG_ENTRY0("link_err", DCCE(LINK_ERR)),
890         FLAG_ENTRY0("misc_cntr_rollover_err", DCCE(MISC_CNTR_ROLLOVER_ERR)),
891         FLAG_ENTRY0("bad_ctrl_dist_err", DCCE(BAD_CTRL_DIST_ERR)),
892         FLAG_ENTRY0("bad_tail_dist_err", DCCE(BAD_TAIL_DIST_ERR)),
893         FLAG_ENTRY0("bad_head_dist_err", DCCE(BAD_HEAD_DIST_ERR)),
894         FLAG_ENTRY0("nonvl15_state_err", DCCE(NONVL15_STATE_ERR)),
895         FLAG_ENTRY0("vl15_multi_err", DCCE(VL15_MULTI_ERR)),
896         FLAG_ENTRY0("bad_pkt_length_err", DCCE(BAD_PKT_LENGTH_ERR)),
897         FLAG_ENTRY0("unsup_vl_err", DCCE(UNSUP_VL_ERR)),
898         FLAG_ENTRY0("perm_nvl15_err", DCCE(PERM_NVL15_ERR)),
899         FLAG_ENTRY0("slid_zero_err", DCCE(SLID_ZERO_ERR)),
900         FLAG_ENTRY0("dlid_zero_err", DCCE(DLID_ZERO_ERR)),
901         FLAG_ENTRY0("length_mtu_err", DCCE(LENGTH_MTU_ERR)),
902         FLAG_ENTRY0("rx_early_drop_err", DCCE(RX_EARLY_DROP_ERR)),
903         FLAG_ENTRY0("late_short_err", DCCE(LATE_SHORT_ERR)),
904         FLAG_ENTRY0("late_long_err", DCCE(LATE_LONG_ERR)),
905         FLAG_ENTRY0("late_ebp_err", DCCE(LATE_EBP_ERR)),
906         FLAG_ENTRY0("fpe_tx_fifo_ovflw_err", DCCE(FPE_TX_FIFO_OVFLW_ERR)),
907         FLAG_ENTRY0("fpe_tx_fifo_unflw_err", DCCE(FPE_TX_FIFO_UNFLW_ERR)),
908         FLAG_ENTRY0("csr_access_blocked_host", DCCE(CSR_ACCESS_BLOCKED_HOST)),
909         FLAG_ENTRY0("csr_access_blocked_uc", DCCE(CSR_ACCESS_BLOCKED_UC)),
910         FLAG_ENTRY0("tx_ctrl_parity_err", DCCE(TX_CTRL_PARITY_ERR)),
911         FLAG_ENTRY0("tx_ctrl_parity_mbe_err", DCCE(TX_CTRL_PARITY_MBE_ERR)),
912         FLAG_ENTRY0("tx_sc_parity_err", DCCE(TX_SC_PARITY_ERR)),
913         FLAG_ENTRY0("rx_ctrl_parity_mbe_err", DCCE(RX_CTRL_PARITY_MBE_ERR)),
914         FLAG_ENTRY0("csr_parity_err", DCCE(CSR_PARITY_ERR)),
915         FLAG_ENTRY0("csr_inval_addr", DCCE(CSR_INVAL_ADDR)),
916         FLAG_ENTRY0("tx_byte_shft_parity_err", DCCE(TX_BYTE_SHFT_PARITY_ERR)),
917         FLAG_ENTRY0("rx_byte_shft_parity_err", DCCE(RX_BYTE_SHFT_PARITY_ERR)),
918         FLAG_ENTRY0("fmconfig_err", DCCE(FMCONFIG_ERR)),
919         FLAG_ENTRY0("rcvport_err", DCCE(RCVPORT_ERR)),
920 };
921
922 /*
923  * LCB error flags
924  */
925 #define LCBE(name) DC_LCB_ERR_FLG_##name##_SMASK
926 static struct flag_table lcb_err_flags[] = {
927 /* 0*/  FLAG_ENTRY0("CSR_PARITY_ERR", LCBE(CSR_PARITY_ERR)),
928 /* 1*/  FLAG_ENTRY0("INVALID_CSR_ADDR", LCBE(INVALID_CSR_ADDR)),
929 /* 2*/  FLAG_ENTRY0("RST_FOR_FAILED_DESKEW", LCBE(RST_FOR_FAILED_DESKEW)),
930 /* 3*/  FLAG_ENTRY0("ALL_LNS_FAILED_REINIT_TEST",
931                 LCBE(ALL_LNS_FAILED_REINIT_TEST)),
932 /* 4*/  FLAG_ENTRY0("LOST_REINIT_STALL_OR_TOS", LCBE(LOST_REINIT_STALL_OR_TOS)),
933 /* 5*/  FLAG_ENTRY0("TX_LESS_THAN_FOUR_LNS", LCBE(TX_LESS_THAN_FOUR_LNS)),
934 /* 6*/  FLAG_ENTRY0("RX_LESS_THAN_FOUR_LNS", LCBE(RX_LESS_THAN_FOUR_LNS)),
935 /* 7*/  FLAG_ENTRY0("SEQ_CRC_ERR", LCBE(SEQ_CRC_ERR)),
936 /* 8*/  FLAG_ENTRY0("REINIT_FROM_PEER", LCBE(REINIT_FROM_PEER)),
937 /* 9*/  FLAG_ENTRY0("REINIT_FOR_LN_DEGRADE", LCBE(REINIT_FOR_LN_DEGRADE)),
938 /*10*/  FLAG_ENTRY0("CRC_ERR_CNT_HIT_LIMIT", LCBE(CRC_ERR_CNT_HIT_LIMIT)),
939 /*11*/  FLAG_ENTRY0("RCLK_STOPPED", LCBE(RCLK_STOPPED)),
940 /*12*/  FLAG_ENTRY0("UNEXPECTED_REPLAY_MARKER", LCBE(UNEXPECTED_REPLAY_MARKER)),
941 /*13*/  FLAG_ENTRY0("UNEXPECTED_ROUND_TRIP_MARKER",
942                 LCBE(UNEXPECTED_ROUND_TRIP_MARKER)),
943 /*14*/  FLAG_ENTRY0("ILLEGAL_NULL_LTP", LCBE(ILLEGAL_NULL_LTP)),
944 /*15*/  FLAG_ENTRY0("ILLEGAL_FLIT_ENCODING", LCBE(ILLEGAL_FLIT_ENCODING)),
945 /*16*/  FLAG_ENTRY0("FLIT_INPUT_BUF_OFLW", LCBE(FLIT_INPUT_BUF_OFLW)),
946 /*17*/  FLAG_ENTRY0("VL_ACK_INPUT_BUF_OFLW", LCBE(VL_ACK_INPUT_BUF_OFLW)),
947 /*18*/  FLAG_ENTRY0("VL_ACK_INPUT_PARITY_ERR", LCBE(VL_ACK_INPUT_PARITY_ERR)),
948 /*19*/  FLAG_ENTRY0("VL_ACK_INPUT_WRONG_CRC_MODE",
949                 LCBE(VL_ACK_INPUT_WRONG_CRC_MODE)),
950 /*20*/  FLAG_ENTRY0("FLIT_INPUT_BUF_MBE", LCBE(FLIT_INPUT_BUF_MBE)),
951 /*21*/  FLAG_ENTRY0("FLIT_INPUT_BUF_SBE", LCBE(FLIT_INPUT_BUF_SBE)),
952 /*22*/  FLAG_ENTRY0("REPLAY_BUF_MBE", LCBE(REPLAY_BUF_MBE)),
953 /*23*/  FLAG_ENTRY0("REPLAY_BUF_SBE", LCBE(REPLAY_BUF_SBE)),
954 /*24*/  FLAG_ENTRY0("CREDIT_RETURN_FLIT_MBE", LCBE(CREDIT_RETURN_FLIT_MBE)),
955 /*25*/  FLAG_ENTRY0("RST_FOR_LINK_TIMEOUT", LCBE(RST_FOR_LINK_TIMEOUT)),
956 /*26*/  FLAG_ENTRY0("RST_FOR_INCOMPLT_RND_TRIP",
957                 LCBE(RST_FOR_INCOMPLT_RND_TRIP)),
958 /*27*/  FLAG_ENTRY0("HOLD_REINIT", LCBE(HOLD_REINIT)),
959 /*28*/  FLAG_ENTRY0("NEG_EDGE_LINK_TRANSFER_ACTIVE",
960                 LCBE(NEG_EDGE_LINK_TRANSFER_ACTIVE)),
961 /*29*/  FLAG_ENTRY0("REDUNDANT_FLIT_PARITY_ERR",
962                 LCBE(REDUNDANT_FLIT_PARITY_ERR))
963 };
964
965 /*
966  * DC8051 Error Flags
967  */
968 #define D8E(name) DC_DC8051_ERR_FLG_##name##_SMASK
969 static struct flag_table dc8051_err_flags[] = {
970         FLAG_ENTRY0("SET_BY_8051", D8E(SET_BY_8051)),
971         FLAG_ENTRY0("LOST_8051_HEART_BEAT", D8E(LOST_8051_HEART_BEAT)),
972         FLAG_ENTRY0("CRAM_MBE", D8E(CRAM_MBE)),
973         FLAG_ENTRY0("CRAM_SBE", D8E(CRAM_SBE)),
974         FLAG_ENTRY0("DRAM_MBE", D8E(DRAM_MBE)),
975         FLAG_ENTRY0("DRAM_SBE", D8E(DRAM_SBE)),
976         FLAG_ENTRY0("IRAM_MBE", D8E(IRAM_MBE)),
977         FLAG_ENTRY0("IRAM_SBE", D8E(IRAM_SBE)),
978         FLAG_ENTRY0("UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES",
979                     D8E(UNMATCHED_SECURE_MSG_ACROSS_BCC_LANES)),
980         FLAG_ENTRY0("INVALID_CSR_ADDR", D8E(INVALID_CSR_ADDR)),
981 };
982
983 /*
984  * DC8051 Information Error flags
985  *
986  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.ERROR field.
987  */
988 static struct flag_table dc8051_info_err_flags[] = {
989         FLAG_ENTRY0("Spico ROM check failed",  SPICO_ROM_FAILED),
990         FLAG_ENTRY0("Unknown frame received",  UNKNOWN_FRAME),
991         FLAG_ENTRY0("Target BER not met",      TARGET_BER_NOT_MET),
992         FLAG_ENTRY0("Serdes internal loopback failure",
993                     FAILED_SERDES_INTERNAL_LOOPBACK),
994         FLAG_ENTRY0("Failed SerDes init",      FAILED_SERDES_INIT),
995         FLAG_ENTRY0("Failed LNI(Polling)",     FAILED_LNI_POLLING),
996         FLAG_ENTRY0("Failed LNI(Debounce)",    FAILED_LNI_DEBOUNCE),
997         FLAG_ENTRY0("Failed LNI(EstbComm)",    FAILED_LNI_ESTBCOMM),
998         FLAG_ENTRY0("Failed LNI(OptEq)",       FAILED_LNI_OPTEQ),
999         FLAG_ENTRY0("Failed LNI(VerifyCap_1)", FAILED_LNI_VERIFY_CAP1),
1000         FLAG_ENTRY0("Failed LNI(VerifyCap_2)", FAILED_LNI_VERIFY_CAP2),
1001         FLAG_ENTRY0("Failed LNI(ConfigLT)",    FAILED_LNI_CONFIGLT),
1002         FLAG_ENTRY0("Host Handshake Timeout",  HOST_HANDSHAKE_TIMEOUT),
1003         FLAG_ENTRY0("External Device Request Timeout",
1004                     EXTERNAL_DEVICE_REQ_TIMEOUT),
1005 };
1006
1007 /*
1008  * DC8051 Information Host Information flags
1009  *
1010  * Flags in DC8051_DBG_ERR_INFO_SET_BY_8051.HOST_MSG field.
1011  */
1012 static struct flag_table dc8051_info_host_msg_flags[] = {
1013         FLAG_ENTRY0("Host request done", 0x0001),
1014         FLAG_ENTRY0("BC PWR_MGM message", 0x0002),
1015         FLAG_ENTRY0("BC SMA message", 0x0004),
1016         FLAG_ENTRY0("BC Unknown message (BCC)", 0x0008),
1017         FLAG_ENTRY0("BC Unknown message (LCB)", 0x0010),
1018         FLAG_ENTRY0("External device config request", 0x0020),
1019         FLAG_ENTRY0("VerifyCap all frames received", 0x0040),
1020         FLAG_ENTRY0("LinkUp achieved", 0x0080),
1021         FLAG_ENTRY0("Link going down", 0x0100),
1022         FLAG_ENTRY0("Link width downgraded", 0x0200),
1023 };
1024
1025 static u32 encoded_size(u32 size);
1026 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate);
1027 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state);
1028 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
1029                                u8 *continuous);
1030 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
1031                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes);
1032 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
1033                                       u8 *remote_tx_rate, u16 *link_widths);
1034 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
1035                                     u8 *flag_bits, u16 *link_widths);
1036 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
1037                                   u8 *device_rev);
1038 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
1039 static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
1040                             u8 *tx_polarity_inversion,
1041                             u8 *rx_polarity_inversion, u8 *max_rate);
1042 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
1043                                 unsigned int context, u64 err_status);
1044 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 source, u64 reg);
1045 static void handle_dcc_err(struct hfi1_devdata *dd,
1046                            unsigned int context, u64 err_status);
1047 static void handle_lcb_err(struct hfi1_devdata *dd,
1048                            unsigned int context, u64 err_status);
1049 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg);
1050 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1051 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1052 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1053 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1054 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1055 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1056 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg);
1057 static void set_partition_keys(struct hfi1_pportdata *ppd);
1058 static const char *link_state_name(u32 state);
1059 static const char *link_state_reason_name(struct hfi1_pportdata *ppd,
1060                                           u32 state);
1061 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
1062                            u64 *out_data);
1063 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1064 static int thermal_init(struct hfi1_devdata *dd);
1065
1066 static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1067 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1068                                             int msecs);
1069 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070                                   int msecs);
1071 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
1072 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state);
1073 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1074                                    int msecs);
1075 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
1076                                          int msecs);
1077 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc);
1078 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr);
1079 static void handle_temp_err(struct hfi1_devdata *dd);
1080 static void dc_shutdown(struct hfi1_devdata *dd);
1081 static void dc_start(struct hfi1_devdata *dd);
1082 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
1083                            unsigned int *np);
1084 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
1085 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms);
1086 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index);
1087 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width);
1088
1089 /*
1090  * Error interrupt table entry.  This is used as input to the interrupt
1091  * "clear down" routine used for all second tier error interrupt register.
1092  * Second tier interrupt registers have a single bit representing them
1093  * in the top-level CceIntStatus.
1094  */
1095 struct err_reg_info {
1096         u32 status;             /* status CSR offset */
1097         u32 clear;              /* clear CSR offset */
1098         u32 mask;               /* mask CSR offset */
1099         void (*handler)(struct hfi1_devdata *dd, u32 source, u64 reg);
1100         const char *desc;
1101 };
1102
1103 #define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
1104 #define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
1105 #define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
1106
1107 /*
1108  * Helpers for building HFI and DC error interrupt table entries.  Different
1109  * helpers are needed because of inconsistent register names.
1110  */
1111 #define EE(reg, handler, desc) \
1112         { reg##_STATUS, reg##_CLEAR, reg##_MASK, \
1113                 handler, desc }
1114 #define DC_EE1(reg, handler, desc) \
1115         { reg##_FLG, reg##_FLG_CLR, reg##_FLG_EN, handler, desc }
1116 #define DC_EE2(reg, handler, desc) \
1117         { reg##_FLG, reg##_CLR, reg##_EN, handler, desc }
1118
1119 /*
1120  * Table of the "misc" grouping of error interrupts.  Each entry refers to
1121  * another register containing more information.
1122  */
1123 static const struct err_reg_info misc_errs[NUM_MISC_ERRS] = {
1124 /* 0*/  EE(CCE_ERR,             handle_cce_err,    "CceErr"),
1125 /* 1*/  EE(RCV_ERR,             handle_rxe_err,    "RxeErr"),
1126 /* 2*/  EE(MISC_ERR,    handle_misc_err,   "MiscErr"),
1127 /* 3*/  { 0, 0, 0, NULL }, /* reserved */
1128 /* 4*/  EE(SEND_PIO_ERR,    handle_pio_err,    "PioErr"),
1129 /* 5*/  EE(SEND_DMA_ERR,    handle_sdma_err,   "SDmaErr"),
1130 /* 6*/  EE(SEND_EGRESS_ERR, handle_egress_err, "EgressErr"),
1131 /* 7*/  EE(SEND_ERR,    handle_txe_err,    "TxeErr")
1132         /* the rest are reserved */
1133 };
1134
1135 /*
1136  * Index into the Various section of the interrupt sources
1137  * corresponding to the Critical Temperature interrupt.
1138  */
1139 #define TCRIT_INT_SOURCE 4
1140
1141 /*
1142  * SDMA error interrupt entry - refers to another register containing more
1143  * information.
1144  */
1145 static const struct err_reg_info sdma_eng_err =
1146         EE(SEND_DMA_ENG_ERR, handle_sdma_eng_err, "SDmaEngErr");
1147
1148 static const struct err_reg_info various_err[NUM_VARIOUS] = {
1149 /* 0*/  { 0, 0, 0, NULL }, /* PbcInt */
1150 /* 1*/  { 0, 0, 0, NULL }, /* GpioAssertInt */
1151 /* 2*/  EE(ASIC_QSFP1,  handle_qsfp_int,        "QSFP1"),
1152 /* 3*/  EE(ASIC_QSFP2,  handle_qsfp_int,        "QSFP2"),
1153 /* 4*/  { 0, 0, 0, NULL }, /* TCritInt */
1154         /* rest are reserved */
1155 };
1156
1157 /*
1158  * The DC encoding of mtu_cap for 10K MTU in the DCC_CFG_PORT_CONFIG
1159  * register can not be derived from the MTU value because 10K is not
1160  * a power of 2. Therefore, we need a constant. Everything else can
1161  * be calculated.
1162  */
1163 #define DCC_CFG_PORT_MTU_CAP_10240 7
1164
1165 /*
1166  * Table of the DC grouping of error interrupts.  Each entry refers to
1167  * another register containing more information.
1168  */
1169 static const struct err_reg_info dc_errs[NUM_DC_ERRS] = {
1170 /* 0*/  DC_EE1(DCC_ERR,         handle_dcc_err,        "DCC Err"),
1171 /* 1*/  DC_EE2(DC_LCB_ERR,      handle_lcb_err,        "LCB Err"),
1172 /* 2*/  DC_EE2(DC_DC8051_ERR,   handle_8051_interrupt, "DC8051 Interrupt"),
1173 /* 3*/  /* dc_lbm_int - special, see is_dc_int() */
1174         /* the rest are reserved */
1175 };
1176
1177 struct cntr_entry {
1178         /*
1179          * counter name
1180          */
1181         char *name;
1182
1183         /*
1184          * csr to read for name (if applicable)
1185          */
1186         u64 csr;
1187
1188         /*
1189          * offset into dd or ppd to store the counter's value
1190          */
1191         int offset;
1192
1193         /*
1194          * flags
1195          */
1196         u8 flags;
1197
1198         /*
1199          * accessor for stat element, context either dd or ppd
1200          */
1201         u64 (*rw_cntr)(const struct cntr_entry *, void *context, int vl,
1202                        int mode, u64 data);
1203 };
1204
1205 #define C_RCV_HDR_OVF_FIRST C_RCV_HDR_OVF_0
1206 #define C_RCV_HDR_OVF_LAST C_RCV_HDR_OVF_159
1207
1208 #define CNTR_ELEM(name, csr, offset, flags, accessor) \
1209 { \
1210         name, \
1211         csr, \
1212         offset, \
1213         flags, \
1214         accessor \
1215 }
1216
1217 /* 32bit RXE */
1218 #define RXE32_PORT_CNTR_ELEM(name, counter, flags) \
1219 CNTR_ELEM(#name, \
1220           (counter * 8 + RCV_COUNTER_ARRAY32), \
1221           0, flags | CNTR_32BIT, \
1222           port_access_u32_csr)
1223
1224 #define RXE32_DEV_CNTR_ELEM(name, counter, flags) \
1225 CNTR_ELEM(#name, \
1226           (counter * 8 + RCV_COUNTER_ARRAY32), \
1227           0, flags | CNTR_32BIT, \
1228           dev_access_u32_csr)
1229
1230 /* 64bit RXE */
1231 #define RXE64_PORT_CNTR_ELEM(name, counter, flags) \
1232 CNTR_ELEM(#name, \
1233           (counter * 8 + RCV_COUNTER_ARRAY64), \
1234           0, flags, \
1235           port_access_u64_csr)
1236
1237 #define RXE64_DEV_CNTR_ELEM(name, counter, flags) \
1238 CNTR_ELEM(#name, \
1239           (counter * 8 + RCV_COUNTER_ARRAY64), \
1240           0, flags, \
1241           dev_access_u64_csr)
1242
1243 #define OVR_LBL(ctx) C_RCV_HDR_OVF_ ## ctx
1244 #define OVR_ELM(ctx) \
1245 CNTR_ELEM("RcvHdrOvr" #ctx, \
1246           (RCV_HDR_OVFL_CNT + ctx * 0x100), \
1247           0, CNTR_NORMAL, port_access_u64_csr)
1248
1249 /* 32bit TXE */
1250 #define TXE32_PORT_CNTR_ELEM(name, counter, flags) \
1251 CNTR_ELEM(#name, \
1252           (counter * 8 + SEND_COUNTER_ARRAY32), \
1253           0, flags | CNTR_32BIT, \
1254           port_access_u32_csr)
1255
1256 /* 64bit TXE */
1257 #define TXE64_PORT_CNTR_ELEM(name, counter, flags) \
1258 CNTR_ELEM(#name, \
1259           (counter * 8 + SEND_COUNTER_ARRAY64), \
1260           0, flags, \
1261           port_access_u64_csr)
1262
1263 # define TX64_DEV_CNTR_ELEM(name, counter, flags) \
1264 CNTR_ELEM(#name,\
1265           counter * 8 + SEND_COUNTER_ARRAY64, \
1266           0, \
1267           flags, \
1268           dev_access_u64_csr)
1269
1270 /* CCE */
1271 #define CCE_PERF_DEV_CNTR_ELEM(name, counter, flags) \
1272 CNTR_ELEM(#name, \
1273           (counter * 8 + CCE_COUNTER_ARRAY32), \
1274           0, flags | CNTR_32BIT, \
1275           dev_access_u32_csr)
1276
1277 #define CCE_INT_DEV_CNTR_ELEM(name, counter, flags) \
1278 CNTR_ELEM(#name, \
1279           (counter * 8 + CCE_INT_COUNTER_ARRAY32), \
1280           0, flags | CNTR_32BIT, \
1281           dev_access_u32_csr)
1282
1283 /* DC */
1284 #define DC_PERF_CNTR(name, counter, flags) \
1285 CNTR_ELEM(#name, \
1286           counter, \
1287           0, \
1288           flags, \
1289           dev_access_u64_csr)
1290
1291 #define DC_PERF_CNTR_LCB(name, counter, flags) \
1292 CNTR_ELEM(#name, \
1293           counter, \
1294           0, \
1295           flags, \
1296           dc_access_lcb_cntr)
1297
1298 /* ibp counters */
1299 #define SW_IBP_CNTR(name, cntr) \
1300 CNTR_ELEM(#name, \
1301           0, \
1302           0, \
1303           CNTR_SYNTH, \
1304           access_ibp_##cntr)
1305
1306 /**
1307  * hfi_addr_from_offset - return addr for readq/writeq
1308  * @dd - the dd device
1309  * @offset - the offset of the CSR within bar0
1310  *
1311  * This routine selects the appropriate base address
1312  * based on the indicated offset.
1313  */
1314 static inline void __iomem *hfi1_addr_from_offset(
1315         const struct hfi1_devdata *dd,
1316         u32 offset)
1317 {
1318         if (offset >= dd->base2_start)
1319                 return dd->kregbase2 + (offset - dd->base2_start);
1320         return dd->kregbase1 + offset;
1321 }
1322
1323 /**
1324  * read_csr - read CSR at the indicated offset
1325  * @dd - the dd device
1326  * @offset - the offset of the CSR within bar0
1327  *
1328  * Return: the value read or all FF's if there
1329  * is no mapping
1330  */
1331 u64 read_csr(const struct hfi1_devdata *dd, u32 offset)
1332 {
1333         if (dd->flags & HFI1_PRESENT)
1334                 return readq(hfi1_addr_from_offset(dd, offset));
1335         return -1;
1336 }
1337
1338 /**
1339  * write_csr - write CSR at the indicated offset
1340  * @dd - the dd device
1341  * @offset - the offset of the CSR within bar0
1342  * @value - value to write
1343  */
1344 void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value)
1345 {
1346         if (dd->flags & HFI1_PRESENT) {
1347                 void __iomem *base = hfi1_addr_from_offset(dd, offset);
1348
1349                 /* avoid write to RcvArray */
1350                 if (WARN_ON(offset >= RCV_ARRAY && offset < dd->base2_start))
1351                         return;
1352                 writeq(value, base);
1353         }
1354 }
1355
1356 /**
1357  * get_csr_addr - return te iomem address for offset
1358  * @dd - the dd device
1359  * @offset - the offset of the CSR within bar0
1360  *
1361  * Return: The iomem address to use in subsequent
1362  * writeq/readq operations.
1363  */
1364 void __iomem *get_csr_addr(
1365         const struct hfi1_devdata *dd,
1366         u32 offset)
1367 {
1368         if (dd->flags & HFI1_PRESENT)
1369                 return hfi1_addr_from_offset(dd, offset);
1370         return NULL;
1371 }
1372
1373 static inline u64 read_write_csr(const struct hfi1_devdata *dd, u32 csr,
1374                                  int mode, u64 value)
1375 {
1376         u64 ret;
1377
1378         if (mode == CNTR_MODE_R) {
1379                 ret = read_csr(dd, csr);
1380         } else if (mode == CNTR_MODE_W) {
1381                 write_csr(dd, csr, value);
1382                 ret = value;
1383         } else {
1384                 dd_dev_err(dd, "Invalid cntr register access mode");
1385                 return 0;
1386         }
1387
1388         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, ret, mode);
1389         return ret;
1390 }
1391
1392 /* Dev Access */
1393 static u64 dev_access_u32_csr(const struct cntr_entry *entry,
1394                               void *context, int vl, int mode, u64 data)
1395 {
1396         struct hfi1_devdata *dd = context;
1397         u64 csr = entry->csr;
1398
1399         if (entry->flags & CNTR_SDMA) {
1400                 if (vl == CNTR_INVALID_VL)
1401                         return 0;
1402                 csr += 0x100 * vl;
1403         } else {
1404                 if (vl != CNTR_INVALID_VL)
1405                         return 0;
1406         }
1407         return read_write_csr(dd, csr, mode, data);
1408 }
1409
1410 static u64 access_sde_err_cnt(const struct cntr_entry *entry,
1411                               void *context, int idx, int mode, u64 data)
1412 {
1413         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1414
1415         if (dd->per_sdma && idx < dd->num_sdma)
1416                 return dd->per_sdma[idx].err_cnt;
1417         return 0;
1418 }
1419
1420 static u64 access_sde_int_cnt(const struct cntr_entry *entry,
1421                               void *context, int idx, int mode, u64 data)
1422 {
1423         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1424
1425         if (dd->per_sdma && idx < dd->num_sdma)
1426                 return dd->per_sdma[idx].sdma_int_cnt;
1427         return 0;
1428 }
1429
1430 static u64 access_sde_idle_int_cnt(const struct cntr_entry *entry,
1431                                    void *context, int idx, int mode, u64 data)
1432 {
1433         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1434
1435         if (dd->per_sdma && idx < dd->num_sdma)
1436                 return dd->per_sdma[idx].idle_int_cnt;
1437         return 0;
1438 }
1439
1440 static u64 access_sde_progress_int_cnt(const struct cntr_entry *entry,
1441                                        void *context, int idx, int mode,
1442                                        u64 data)
1443 {
1444         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1445
1446         if (dd->per_sdma && idx < dd->num_sdma)
1447                 return dd->per_sdma[idx].progress_int_cnt;
1448         return 0;
1449 }
1450
1451 static u64 dev_access_u64_csr(const struct cntr_entry *entry, void *context,
1452                               int vl, int mode, u64 data)
1453 {
1454         struct hfi1_devdata *dd = context;
1455
1456         u64 val = 0;
1457         u64 csr = entry->csr;
1458
1459         if (entry->flags & CNTR_VL) {
1460                 if (vl == CNTR_INVALID_VL)
1461                         return 0;
1462                 csr += 8 * vl;
1463         } else {
1464                 if (vl != CNTR_INVALID_VL)
1465                         return 0;
1466         }
1467
1468         val = read_write_csr(dd, csr, mode, data);
1469         return val;
1470 }
1471
1472 static u64 dc_access_lcb_cntr(const struct cntr_entry *entry, void *context,
1473                               int vl, int mode, u64 data)
1474 {
1475         struct hfi1_devdata *dd = context;
1476         u32 csr = entry->csr;
1477         int ret = 0;
1478
1479         if (vl != CNTR_INVALID_VL)
1480                 return 0;
1481         if (mode == CNTR_MODE_R)
1482                 ret = read_lcb_csr(dd, csr, &data);
1483         else if (mode == CNTR_MODE_W)
1484                 ret = write_lcb_csr(dd, csr, data);
1485
1486         if (ret) {
1487                 dd_dev_err(dd, "Could not acquire LCB for counter 0x%x", csr);
1488                 return 0;
1489         }
1490
1491         hfi1_cdbg(CNTR, "csr 0x%x val 0x%llx mode %d", csr, data, mode);
1492         return data;
1493 }
1494
1495 /* Port Access */
1496 static u64 port_access_u32_csr(const struct cntr_entry *entry, void *context,
1497                                int vl, int mode, u64 data)
1498 {
1499         struct hfi1_pportdata *ppd = context;
1500
1501         if (vl != CNTR_INVALID_VL)
1502                 return 0;
1503         return read_write_csr(ppd->dd, entry->csr, mode, data);
1504 }
1505
1506 static u64 port_access_u64_csr(const struct cntr_entry *entry,
1507                                void *context, int vl, int mode, u64 data)
1508 {
1509         struct hfi1_pportdata *ppd = context;
1510         u64 val;
1511         u64 csr = entry->csr;
1512
1513         if (entry->flags & CNTR_VL) {
1514                 if (vl == CNTR_INVALID_VL)
1515                         return 0;
1516                 csr += 8 * vl;
1517         } else {
1518                 if (vl != CNTR_INVALID_VL)
1519                         return 0;
1520         }
1521         val = read_write_csr(ppd->dd, csr, mode, data);
1522         return val;
1523 }
1524
1525 /* Software defined */
1526 static inline u64 read_write_sw(struct hfi1_devdata *dd, u64 *cntr, int mode,
1527                                 u64 data)
1528 {
1529         u64 ret;
1530
1531         if (mode == CNTR_MODE_R) {
1532                 ret = *cntr;
1533         } else if (mode == CNTR_MODE_W) {
1534                 *cntr = data;
1535                 ret = data;
1536         } else {
1537                 dd_dev_err(dd, "Invalid cntr sw access mode");
1538                 return 0;
1539         }
1540
1541         hfi1_cdbg(CNTR, "val 0x%llx mode %d", ret, mode);
1542
1543         return ret;
1544 }
1545
1546 static u64 access_sw_link_dn_cnt(const struct cntr_entry *entry, void *context,
1547                                  int vl, int mode, u64 data)
1548 {
1549         struct hfi1_pportdata *ppd = context;
1550
1551         if (vl != CNTR_INVALID_VL)
1552                 return 0;
1553         return read_write_sw(ppd->dd, &ppd->link_downed, mode, data);
1554 }
1555
1556 static u64 access_sw_link_up_cnt(const struct cntr_entry *entry, void *context,
1557                                  int vl, int mode, u64 data)
1558 {
1559         struct hfi1_pportdata *ppd = context;
1560
1561         if (vl != CNTR_INVALID_VL)
1562                 return 0;
1563         return read_write_sw(ppd->dd, &ppd->link_up, mode, data);
1564 }
1565
1566 static u64 access_sw_unknown_frame_cnt(const struct cntr_entry *entry,
1567                                        void *context, int vl, int mode,
1568                                        u64 data)
1569 {
1570         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1571
1572         if (vl != CNTR_INVALID_VL)
1573                 return 0;
1574         return read_write_sw(ppd->dd, &ppd->unknown_frame_count, mode, data);
1575 }
1576
1577 static u64 access_sw_xmit_discards(const struct cntr_entry *entry,
1578                                    void *context, int vl, int mode, u64 data)
1579 {
1580         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;
1581         u64 zero = 0;
1582         u64 *counter;
1583
1584         if (vl == CNTR_INVALID_VL)
1585                 counter = &ppd->port_xmit_discards;
1586         else if (vl >= 0 && vl < C_VL_COUNT)
1587                 counter = &ppd->port_xmit_discards_vl[vl];
1588         else
1589                 counter = &zero;
1590
1591         return read_write_sw(ppd->dd, counter, mode, data);
1592 }
1593
1594 static u64 access_xmit_constraint_errs(const struct cntr_entry *entry,
1595                                        void *context, int vl, int mode,
1596                                        u64 data)
1597 {
1598         struct hfi1_pportdata *ppd = context;
1599
1600         if (vl != CNTR_INVALID_VL)
1601                 return 0;
1602
1603         return read_write_sw(ppd->dd, &ppd->port_xmit_constraint_errors,
1604                              mode, data);
1605 }
1606
1607 static u64 access_rcv_constraint_errs(const struct cntr_entry *entry,
1608                                       void *context, int vl, int mode, u64 data)
1609 {
1610         struct hfi1_pportdata *ppd = context;
1611
1612         if (vl != CNTR_INVALID_VL)
1613                 return 0;
1614
1615         return read_write_sw(ppd->dd, &ppd->port_rcv_constraint_errors,
1616                              mode, data);
1617 }
1618
1619 u64 get_all_cpu_total(u64 __percpu *cntr)
1620 {
1621         int cpu;
1622         u64 counter = 0;
1623
1624         for_each_possible_cpu(cpu)
1625                 counter += *per_cpu_ptr(cntr, cpu);
1626         return counter;
1627 }
1628
1629 static u64 read_write_cpu(struct hfi1_devdata *dd, u64 *z_val,
1630                           u64 __percpu *cntr,
1631                           int vl, int mode, u64 data)
1632 {
1633         u64 ret = 0;
1634
1635         if (vl != CNTR_INVALID_VL)
1636                 return 0;
1637
1638         if (mode == CNTR_MODE_R) {
1639                 ret = get_all_cpu_total(cntr) - *z_val;
1640         } else if (mode == CNTR_MODE_W) {
1641                 /* A write can only zero the counter */
1642                 if (data == 0)
1643                         *z_val = get_all_cpu_total(cntr);
1644                 else
1645                         dd_dev_err(dd, "Per CPU cntrs can only be zeroed");
1646         } else {
1647                 dd_dev_err(dd, "Invalid cntr sw cpu access mode");
1648                 return 0;
1649         }
1650
1651         return ret;
1652 }
1653
1654 static u64 access_sw_cpu_intr(const struct cntr_entry *entry,
1655                               void *context, int vl, int mode, u64 data)
1656 {
1657         struct hfi1_devdata *dd = context;
1658
1659         return read_write_cpu(dd, &dd->z_int_counter, dd->int_counter, vl,
1660                               mode, data);
1661 }
1662
1663 static u64 access_sw_cpu_rcv_limit(const struct cntr_entry *entry,
1664                                    void *context, int vl, int mode, u64 data)
1665 {
1666         struct hfi1_devdata *dd = context;
1667
1668         return read_write_cpu(dd, &dd->z_rcv_limit, dd->rcv_limit, vl,
1669                               mode, data);
1670 }
1671
1672 static u64 access_sw_pio_wait(const struct cntr_entry *entry,
1673                               void *context, int vl, int mode, u64 data)
1674 {
1675         struct hfi1_devdata *dd = context;
1676
1677         return dd->verbs_dev.n_piowait;
1678 }
1679
1680 static u64 access_sw_pio_drain(const struct cntr_entry *entry,
1681                                void *context, int vl, int mode, u64 data)
1682 {
1683         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1684
1685         return dd->verbs_dev.n_piodrain;
1686 }
1687
1688 static u64 access_sw_vtx_wait(const struct cntr_entry *entry,
1689                               void *context, int vl, int mode, u64 data)
1690 {
1691         struct hfi1_devdata *dd = context;
1692
1693         return dd->verbs_dev.n_txwait;
1694 }
1695
1696 static u64 access_sw_kmem_wait(const struct cntr_entry *entry,
1697                                void *context, int vl, int mode, u64 data)
1698 {
1699         struct hfi1_devdata *dd = context;
1700
1701         return dd->verbs_dev.n_kmem_wait;
1702 }
1703
1704 static u64 access_sw_send_schedule(const struct cntr_entry *entry,
1705                                    void *context, int vl, int mode, u64 data)
1706 {
1707         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1708
1709         return read_write_cpu(dd, &dd->z_send_schedule, dd->send_schedule, vl,
1710                               mode, data);
1711 }
1712
1713 /* Software counters for the error status bits within MISC_ERR_STATUS */
1714 static u64 access_misc_pll_lock_fail_err_cnt(const struct cntr_entry *entry,
1715                                              void *context, int vl, int mode,
1716                                              u64 data)
1717 {
1718         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1719
1720         return dd->misc_err_status_cnt[12];
1721 }
1722
1723 static u64 access_misc_mbist_fail_err_cnt(const struct cntr_entry *entry,
1724                                           void *context, int vl, int mode,
1725                                           u64 data)
1726 {
1727         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1728
1729         return dd->misc_err_status_cnt[11];
1730 }
1731
1732 static u64 access_misc_invalid_eep_cmd_err_cnt(const struct cntr_entry *entry,
1733                                                void *context, int vl, int mode,
1734                                                u64 data)
1735 {
1736         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1737
1738         return dd->misc_err_status_cnt[10];
1739 }
1740
1741 static u64 access_misc_efuse_done_parity_err_cnt(const struct cntr_entry *entry,
1742                                                  void *context, int vl,
1743                                                  int mode, u64 data)
1744 {
1745         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1746
1747         return dd->misc_err_status_cnt[9];
1748 }
1749
1750 static u64 access_misc_efuse_write_err_cnt(const struct cntr_entry *entry,
1751                                            void *context, int vl, int mode,
1752                                            u64 data)
1753 {
1754         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1755
1756         return dd->misc_err_status_cnt[8];
1757 }
1758
1759 static u64 access_misc_efuse_read_bad_addr_err_cnt(
1760                                 const struct cntr_entry *entry,
1761                                 void *context, int vl, int mode, u64 data)
1762 {
1763         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1764
1765         return dd->misc_err_status_cnt[7];
1766 }
1767
1768 static u64 access_misc_efuse_csr_parity_err_cnt(const struct cntr_entry *entry,
1769                                                 void *context, int vl,
1770                                                 int mode, u64 data)
1771 {
1772         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1773
1774         return dd->misc_err_status_cnt[6];
1775 }
1776
1777 static u64 access_misc_fw_auth_failed_err_cnt(const struct cntr_entry *entry,
1778                                               void *context, int vl, int mode,
1779                                               u64 data)
1780 {
1781         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1782
1783         return dd->misc_err_status_cnt[5];
1784 }
1785
1786 static u64 access_misc_key_mismatch_err_cnt(const struct cntr_entry *entry,
1787                                             void *context, int vl, int mode,
1788                                             u64 data)
1789 {
1790         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1791
1792         return dd->misc_err_status_cnt[4];
1793 }
1794
1795 static u64 access_misc_sbus_write_failed_err_cnt(const struct cntr_entry *entry,
1796                                                  void *context, int vl,
1797                                                  int mode, u64 data)
1798 {
1799         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1800
1801         return dd->misc_err_status_cnt[3];
1802 }
1803
1804 static u64 access_misc_csr_write_bad_addr_err_cnt(
1805                                 const struct cntr_entry *entry,
1806                                 void *context, int vl, int mode, u64 data)
1807 {
1808         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1809
1810         return dd->misc_err_status_cnt[2];
1811 }
1812
1813 static u64 access_misc_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1814                                                  void *context, int vl,
1815                                                  int mode, u64 data)
1816 {
1817         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1818
1819         return dd->misc_err_status_cnt[1];
1820 }
1821
1822 static u64 access_misc_csr_parity_err_cnt(const struct cntr_entry *entry,
1823                                           void *context, int vl, int mode,
1824                                           u64 data)
1825 {
1826         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1827
1828         return dd->misc_err_status_cnt[0];
1829 }
1830
1831 /*
1832  * Software counter for the aggregate of
1833  * individual CceErrStatus counters
1834  */
1835 static u64 access_sw_cce_err_status_aggregated_cnt(
1836                                 const struct cntr_entry *entry,
1837                                 void *context, int vl, int mode, u64 data)
1838 {
1839         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1840
1841         return dd->sw_cce_err_status_aggregate;
1842 }
1843
1844 /*
1845  * Software counters corresponding to each of the
1846  * error status bits within CceErrStatus
1847  */
1848 static u64 access_cce_msix_csr_parity_err_cnt(const struct cntr_entry *entry,
1849                                               void *context, int vl, int mode,
1850                                               u64 data)
1851 {
1852         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1853
1854         return dd->cce_err_status_cnt[40];
1855 }
1856
1857 static u64 access_cce_int_map_unc_err_cnt(const struct cntr_entry *entry,
1858                                           void *context, int vl, int mode,
1859                                           u64 data)
1860 {
1861         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1862
1863         return dd->cce_err_status_cnt[39];
1864 }
1865
1866 static u64 access_cce_int_map_cor_err_cnt(const struct cntr_entry *entry,
1867                                           void *context, int vl, int mode,
1868                                           u64 data)
1869 {
1870         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1871
1872         return dd->cce_err_status_cnt[38];
1873 }
1874
1875 static u64 access_cce_msix_table_unc_err_cnt(const struct cntr_entry *entry,
1876                                              void *context, int vl, int mode,
1877                                              u64 data)
1878 {
1879         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1880
1881         return dd->cce_err_status_cnt[37];
1882 }
1883
1884 static u64 access_cce_msix_table_cor_err_cnt(const struct cntr_entry *entry,
1885                                              void *context, int vl, int mode,
1886                                              u64 data)
1887 {
1888         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1889
1890         return dd->cce_err_status_cnt[36];
1891 }
1892
1893 static u64 access_cce_rxdma_conv_fifo_parity_err_cnt(
1894                                 const struct cntr_entry *entry,
1895                                 void *context, int vl, int mode, u64 data)
1896 {
1897         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1898
1899         return dd->cce_err_status_cnt[35];
1900 }
1901
1902 static u64 access_cce_rcpl_async_fifo_parity_err_cnt(
1903                                 const struct cntr_entry *entry,
1904                                 void *context, int vl, int mode, u64 data)
1905 {
1906         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1907
1908         return dd->cce_err_status_cnt[34];
1909 }
1910
1911 static u64 access_cce_seg_write_bad_addr_err_cnt(const struct cntr_entry *entry,
1912                                                  void *context, int vl,
1913                                                  int mode, u64 data)
1914 {
1915         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1916
1917         return dd->cce_err_status_cnt[33];
1918 }
1919
1920 static u64 access_cce_seg_read_bad_addr_err_cnt(const struct cntr_entry *entry,
1921                                                 void *context, int vl, int mode,
1922                                                 u64 data)
1923 {
1924         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1925
1926         return dd->cce_err_status_cnt[32];
1927 }
1928
1929 static u64 access_la_triggered_cnt(const struct cntr_entry *entry,
1930                                    void *context, int vl, int mode, u64 data)
1931 {
1932         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1933
1934         return dd->cce_err_status_cnt[31];
1935 }
1936
1937 static u64 access_cce_trgt_cpl_timeout_err_cnt(const struct cntr_entry *entry,
1938                                                void *context, int vl, int mode,
1939                                                u64 data)
1940 {
1941         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1942
1943         return dd->cce_err_status_cnt[30];
1944 }
1945
1946 static u64 access_pcic_receive_parity_err_cnt(const struct cntr_entry *entry,
1947                                               void *context, int vl, int mode,
1948                                               u64 data)
1949 {
1950         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1951
1952         return dd->cce_err_status_cnt[29];
1953 }
1954
1955 static u64 access_pcic_transmit_back_parity_err_cnt(
1956                                 const struct cntr_entry *entry,
1957                                 void *context, int vl, int mode, u64 data)
1958 {
1959         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1960
1961         return dd->cce_err_status_cnt[28];
1962 }
1963
1964 static u64 access_pcic_transmit_front_parity_err_cnt(
1965                                 const struct cntr_entry *entry,
1966                                 void *context, int vl, int mode, u64 data)
1967 {
1968         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1969
1970         return dd->cce_err_status_cnt[27];
1971 }
1972
1973 static u64 access_pcic_cpl_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1974                                              void *context, int vl, int mode,
1975                                              u64 data)
1976 {
1977         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1978
1979         return dd->cce_err_status_cnt[26];
1980 }
1981
1982 static u64 access_pcic_cpl_hd_q_unc_err_cnt(const struct cntr_entry *entry,
1983                                             void *context, int vl, int mode,
1984                                             u64 data)
1985 {
1986         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1987
1988         return dd->cce_err_status_cnt[25];
1989 }
1990
1991 static u64 access_pcic_post_dat_q_unc_err_cnt(const struct cntr_entry *entry,
1992                                               void *context, int vl, int mode,
1993                                               u64 data)
1994 {
1995         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
1996
1997         return dd->cce_err_status_cnt[24];
1998 }
1999
2000 static u64 access_pcic_post_hd_q_unc_err_cnt(const struct cntr_entry *entry,
2001                                              void *context, int vl, int mode,
2002                                              u64 data)
2003 {
2004         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2005
2006         return dd->cce_err_status_cnt[23];
2007 }
2008
2009 static u64 access_pcic_retry_sot_mem_unc_err_cnt(const struct cntr_entry *entry,
2010                                                  void *context, int vl,
2011                                                  int mode, u64 data)
2012 {
2013         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2014
2015         return dd->cce_err_status_cnt[22];
2016 }
2017
2018 static u64 access_pcic_retry_mem_unc_err(const struct cntr_entry *entry,
2019                                          void *context, int vl, int mode,
2020                                          u64 data)
2021 {
2022         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2023
2024         return dd->cce_err_status_cnt[21];
2025 }
2026
2027 static u64 access_pcic_n_post_dat_q_parity_err_cnt(
2028                                 const struct cntr_entry *entry,
2029                                 void *context, int vl, int mode, u64 data)
2030 {
2031         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2032
2033         return dd->cce_err_status_cnt[20];
2034 }
2035
2036 static u64 access_pcic_n_post_h_q_parity_err_cnt(const struct cntr_entry *entry,
2037                                                  void *context, int vl,
2038                                                  int mode, u64 data)
2039 {
2040         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2041
2042         return dd->cce_err_status_cnt[19];
2043 }
2044
2045 static u64 access_pcic_cpl_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2046                                              void *context, int vl, int mode,
2047                                              u64 data)
2048 {
2049         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2050
2051         return dd->cce_err_status_cnt[18];
2052 }
2053
2054 static u64 access_pcic_cpl_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2055                                             void *context, int vl, int mode,
2056                                             u64 data)
2057 {
2058         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2059
2060         return dd->cce_err_status_cnt[17];
2061 }
2062
2063 static u64 access_pcic_post_dat_q_cor_err_cnt(const struct cntr_entry *entry,
2064                                               void *context, int vl, int mode,
2065                                               u64 data)
2066 {
2067         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2068
2069         return dd->cce_err_status_cnt[16];
2070 }
2071
2072 static u64 access_pcic_post_hd_q_cor_err_cnt(const struct cntr_entry *entry,
2073                                              void *context, int vl, int mode,
2074                                              u64 data)
2075 {
2076         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2077
2078         return dd->cce_err_status_cnt[15];
2079 }
2080
2081 static u64 access_pcic_retry_sot_mem_cor_err_cnt(const struct cntr_entry *entry,
2082                                                  void *context, int vl,
2083                                                  int mode, u64 data)
2084 {
2085         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2086
2087         return dd->cce_err_status_cnt[14];
2088 }
2089
2090 static u64 access_pcic_retry_mem_cor_err_cnt(const struct cntr_entry *entry,
2091                                              void *context, int vl, int mode,
2092                                              u64 data)
2093 {
2094         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2095
2096         return dd->cce_err_status_cnt[13];
2097 }
2098
2099 static u64 access_cce_cli1_async_fifo_dbg_parity_err_cnt(
2100                                 const struct cntr_entry *entry,
2101                                 void *context, int vl, int mode, u64 data)
2102 {
2103         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2104
2105         return dd->cce_err_status_cnt[12];
2106 }
2107
2108 static u64 access_cce_cli1_async_fifo_rxdma_parity_err_cnt(
2109                                 const struct cntr_entry *entry,
2110                                 void *context, int vl, int mode, u64 data)
2111 {
2112         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2113
2114         return dd->cce_err_status_cnt[11];
2115 }
2116
2117 static u64 access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt(
2118                                 const struct cntr_entry *entry,
2119                                 void *context, int vl, int mode, u64 data)
2120 {
2121         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2122
2123         return dd->cce_err_status_cnt[10];
2124 }
2125
2126 static u64 access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt(
2127                                 const struct cntr_entry *entry,
2128                                 void *context, int vl, int mode, u64 data)
2129 {
2130         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2131
2132         return dd->cce_err_status_cnt[9];
2133 }
2134
2135 static u64 access_cce_cli2_async_fifo_parity_err_cnt(
2136                                 const struct cntr_entry *entry,
2137                                 void *context, int vl, int mode, u64 data)
2138 {
2139         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2140
2141         return dd->cce_err_status_cnt[8];
2142 }
2143
2144 static u64 access_cce_csr_cfg_bus_parity_err_cnt(const struct cntr_entry *entry,
2145                                                  void *context, int vl,
2146                                                  int mode, u64 data)
2147 {
2148         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2149
2150         return dd->cce_err_status_cnt[7];
2151 }
2152
2153 static u64 access_cce_cli0_async_fifo_parity_err_cnt(
2154                                 const struct cntr_entry *entry,
2155                                 void *context, int vl, int mode, u64 data)
2156 {
2157         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2158
2159         return dd->cce_err_status_cnt[6];
2160 }
2161
2162 static u64 access_cce_rspd_data_parity_err_cnt(const struct cntr_entry *entry,
2163                                                void *context, int vl, int mode,
2164                                                u64 data)
2165 {
2166         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2167
2168         return dd->cce_err_status_cnt[5];
2169 }
2170
2171 static u64 access_cce_trgt_access_err_cnt(const struct cntr_entry *entry,
2172                                           void *context, int vl, int mode,
2173                                           u64 data)
2174 {
2175         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2176
2177         return dd->cce_err_status_cnt[4];
2178 }
2179
2180 static u64 access_cce_trgt_async_fifo_parity_err_cnt(
2181                                 const struct cntr_entry *entry,
2182                                 void *context, int vl, int mode, u64 data)
2183 {
2184         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2185
2186         return dd->cce_err_status_cnt[3];
2187 }
2188
2189 static u64 access_cce_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2190                                                  void *context, int vl,
2191                                                  int mode, u64 data)
2192 {
2193         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2194
2195         return dd->cce_err_status_cnt[2];
2196 }
2197
2198 static u64 access_cce_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2199                                                 void *context, int vl,
2200                                                 int mode, u64 data)
2201 {
2202         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2203
2204         return dd->cce_err_status_cnt[1];
2205 }
2206
2207 static u64 access_ccs_csr_parity_err_cnt(const struct cntr_entry *entry,
2208                                          void *context, int vl, int mode,
2209                                          u64 data)
2210 {
2211         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2212
2213         return dd->cce_err_status_cnt[0];
2214 }
2215
2216 /*
2217  * Software counters corresponding to each of the
2218  * error status bits within RcvErrStatus
2219  */
2220 static u64 access_rx_csr_parity_err_cnt(const struct cntr_entry *entry,
2221                                         void *context, int vl, int mode,
2222                                         u64 data)
2223 {
2224         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2225
2226         return dd->rcv_err_status_cnt[63];
2227 }
2228
2229 static u64 access_rx_csr_write_bad_addr_err_cnt(const struct cntr_entry *entry,
2230                                                 void *context, int vl,
2231                                                 int mode, u64 data)
2232 {
2233         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2234
2235         return dd->rcv_err_status_cnt[62];
2236 }
2237
2238 static u64 access_rx_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
2239                                                void *context, int vl, int mode,
2240                                                u64 data)
2241 {
2242         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2243
2244         return dd->rcv_err_status_cnt[61];
2245 }
2246
2247 static u64 access_rx_dma_csr_unc_err_cnt(const struct cntr_entry *entry,
2248                                          void *context, int vl, int mode,
2249                                          u64 data)
2250 {
2251         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2252
2253         return dd->rcv_err_status_cnt[60];
2254 }
2255
2256 static u64 access_rx_dma_dq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2257                                                  void *context, int vl,
2258                                                  int mode, u64 data)
2259 {
2260         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2261
2262         return dd->rcv_err_status_cnt[59];
2263 }
2264
2265 static u64 access_rx_dma_eq_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2266                                                  void *context, int vl,
2267                                                  int mode, u64 data)
2268 {
2269         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2270
2271         return dd->rcv_err_status_cnt[58];
2272 }
2273
2274 static u64 access_rx_dma_csr_parity_err_cnt(const struct cntr_entry *entry,
2275                                             void *context, int vl, int mode,
2276                                             u64 data)
2277 {
2278         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2279
2280         return dd->rcv_err_status_cnt[57];
2281 }
2282
2283 static u64 access_rx_rbuf_data_cor_err_cnt(const struct cntr_entry *entry,
2284                                            void *context, int vl, int mode,
2285                                            u64 data)
2286 {
2287         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2288
2289         return dd->rcv_err_status_cnt[56];
2290 }
2291
2292 static u64 access_rx_rbuf_data_unc_err_cnt(const struct cntr_entry *entry,
2293                                            void *context, int vl, int mode,
2294                                            u64 data)
2295 {
2296         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2297
2298         return dd->rcv_err_status_cnt[55];
2299 }
2300
2301 static u64 access_rx_dma_data_fifo_rd_cor_err_cnt(
2302                                 const struct cntr_entry *entry,
2303                                 void *context, int vl, int mode, u64 data)
2304 {
2305         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2306
2307         return dd->rcv_err_status_cnt[54];
2308 }
2309
2310 static u64 access_rx_dma_data_fifo_rd_unc_err_cnt(
2311                                 const struct cntr_entry *entry,
2312                                 void *context, int vl, int mode, u64 data)
2313 {
2314         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2315
2316         return dd->rcv_err_status_cnt[53];
2317 }
2318
2319 static u64 access_rx_dma_hdr_fifo_rd_cor_err_cnt(const struct cntr_entry *entry,
2320                                                  void *context, int vl,
2321                                                  int mode, u64 data)
2322 {
2323         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2324
2325         return dd->rcv_err_status_cnt[52];
2326 }
2327
2328 static u64 access_rx_dma_hdr_fifo_rd_unc_err_cnt(const struct cntr_entry *entry,
2329                                                  void *context, int vl,
2330                                                  int mode, u64 data)
2331 {
2332         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2333
2334         return dd->rcv_err_status_cnt[51];
2335 }
2336
2337 static u64 access_rx_rbuf_desc_part2_cor_err_cnt(const struct cntr_entry *entry,
2338                                                  void *context, int vl,
2339                                                  int mode, u64 data)
2340 {
2341         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2342
2343         return dd->rcv_err_status_cnt[50];
2344 }
2345
2346 static u64 access_rx_rbuf_desc_part2_unc_err_cnt(const struct cntr_entry *entry,
2347                                                  void *context, int vl,
2348                                                  int mode, u64 data)
2349 {
2350         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2351
2352         return dd->rcv_err_status_cnt[49];
2353 }
2354
2355 static u64 access_rx_rbuf_desc_part1_cor_err_cnt(const struct cntr_entry *entry,
2356                                                  void *context, int vl,
2357                                                  int mode, u64 data)
2358 {
2359         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2360
2361         return dd->rcv_err_status_cnt[48];
2362 }
2363
2364 static u64 access_rx_rbuf_desc_part1_unc_err_cnt(const struct cntr_entry *entry,
2365                                                  void *context, int vl,
2366                                                  int mode, u64 data)
2367 {
2368         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2369
2370         return dd->rcv_err_status_cnt[47];
2371 }
2372
2373 static u64 access_rx_hq_intr_fsm_err_cnt(const struct cntr_entry *entry,
2374                                          void *context, int vl, int mode,
2375                                          u64 data)
2376 {
2377         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2378
2379         return dd->rcv_err_status_cnt[46];
2380 }
2381
2382 static u64 access_rx_hq_intr_csr_parity_err_cnt(
2383                                 const struct cntr_entry *entry,
2384                                 void *context, int vl, int mode, u64 data)
2385 {
2386         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2387
2388         return dd->rcv_err_status_cnt[45];
2389 }
2390
2391 static u64 access_rx_lookup_csr_parity_err_cnt(
2392                                 const struct cntr_entry *entry,
2393                                 void *context, int vl, int mode, u64 data)
2394 {
2395         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2396
2397         return dd->rcv_err_status_cnt[44];
2398 }
2399
2400 static u64 access_rx_lookup_rcv_array_cor_err_cnt(
2401                                 const struct cntr_entry *entry,
2402                                 void *context, int vl, int mode, u64 data)
2403 {
2404         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2405
2406         return dd->rcv_err_status_cnt[43];
2407 }
2408
2409 static u64 access_rx_lookup_rcv_array_unc_err_cnt(
2410                                 const struct cntr_entry *entry,
2411                                 void *context, int vl, int mode, u64 data)
2412 {
2413         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2414
2415         return dd->rcv_err_status_cnt[42];
2416 }
2417
2418 static u64 access_rx_lookup_des_part2_parity_err_cnt(
2419                                 const struct cntr_entry *entry,
2420                                 void *context, int vl, int mode, u64 data)
2421 {
2422         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2423
2424         return dd->rcv_err_status_cnt[41];
2425 }
2426
2427 static u64 access_rx_lookup_des_part1_unc_cor_err_cnt(
2428                                 const struct cntr_entry *entry,
2429                                 void *context, int vl, int mode, u64 data)
2430 {
2431         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2432
2433         return dd->rcv_err_status_cnt[40];
2434 }
2435
2436 static u64 access_rx_lookup_des_part1_unc_err_cnt(
2437                                 const struct cntr_entry *entry,
2438                                 void *context, int vl, int mode, u64 data)
2439 {
2440         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2441
2442         return dd->rcv_err_status_cnt[39];
2443 }
2444
2445 static u64 access_rx_rbuf_next_free_buf_cor_err_cnt(
2446                                 const struct cntr_entry *entry,
2447                                 void *context, int vl, int mode, u64 data)
2448 {
2449         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2450
2451         return dd->rcv_err_status_cnt[38];
2452 }
2453
2454 static u64 access_rx_rbuf_next_free_buf_unc_err_cnt(
2455                                 const struct cntr_entry *entry,
2456                                 void *context, int vl, int mode, u64 data)
2457 {
2458         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2459
2460         return dd->rcv_err_status_cnt[37];
2461 }
2462
2463 static u64 access_rbuf_fl_init_wr_addr_parity_err_cnt(
2464                                 const struct cntr_entry *entry,
2465                                 void *context, int vl, int mode, u64 data)
2466 {
2467         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2468
2469         return dd->rcv_err_status_cnt[36];
2470 }
2471
2472 static u64 access_rx_rbuf_fl_initdone_parity_err_cnt(
2473                                 const struct cntr_entry *entry,
2474                                 void *context, int vl, int mode, u64 data)
2475 {
2476         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2477
2478         return dd->rcv_err_status_cnt[35];
2479 }
2480
2481 static u64 access_rx_rbuf_fl_write_addr_parity_err_cnt(
2482                                 const struct cntr_entry *entry,
2483                                 void *context, int vl, int mode, u64 data)
2484 {
2485         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2486
2487         return dd->rcv_err_status_cnt[34];
2488 }
2489
2490 static u64 access_rx_rbuf_fl_rd_addr_parity_err_cnt(
2491                                 const struct cntr_entry *entry,
2492                                 void *context, int vl, int mode, u64 data)
2493 {
2494         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2495
2496         return dd->rcv_err_status_cnt[33];
2497 }
2498
2499 static u64 access_rx_rbuf_empty_err_cnt(const struct cntr_entry *entry,
2500                                         void *context, int vl, int mode,
2501                                         u64 data)
2502 {
2503         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2504
2505         return dd->rcv_err_status_cnt[32];
2506 }
2507
2508 static u64 access_rx_rbuf_full_err_cnt(const struct cntr_entry *entry,
2509                                        void *context, int vl, int mode,
2510                                        u64 data)
2511 {
2512         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2513
2514         return dd->rcv_err_status_cnt[31];
2515 }
2516
2517 static u64 access_rbuf_bad_lookup_err_cnt(const struct cntr_entry *entry,
2518                                           void *context, int vl, int mode,
2519                                           u64 data)
2520 {
2521         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2522
2523         return dd->rcv_err_status_cnt[30];
2524 }
2525
2526 static u64 access_rbuf_ctx_id_parity_err_cnt(const struct cntr_entry *entry,
2527                                              void *context, int vl, int mode,
2528                                              u64 data)
2529 {
2530         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2531
2532         return dd->rcv_err_status_cnt[29];
2533 }
2534
2535 static u64 access_rbuf_csr_qeopdw_parity_err_cnt(const struct cntr_entry *entry,
2536                                                  void *context, int vl,
2537                                                  int mode, u64 data)
2538 {
2539         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2540
2541         return dd->rcv_err_status_cnt[28];
2542 }
2543
2544 static u64 access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt(
2545                                 const struct cntr_entry *entry,
2546                                 void *context, int vl, int mode, u64 data)
2547 {
2548         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2549
2550         return dd->rcv_err_status_cnt[27];
2551 }
2552
2553 static u64 access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt(
2554                                 const struct cntr_entry *entry,
2555                                 void *context, int vl, int mode, u64 data)
2556 {
2557         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2558
2559         return dd->rcv_err_status_cnt[26];
2560 }
2561
2562 static u64 access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt(
2563                                 const struct cntr_entry *entry,
2564                                 void *context, int vl, int mode, u64 data)
2565 {
2566         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2567
2568         return dd->rcv_err_status_cnt[25];
2569 }
2570
2571 static u64 access_rx_rbuf_csr_q_vld_bit_parity_err_cnt(
2572                                 const struct cntr_entry *entry,
2573                                 void *context, int vl, int mode, u64 data)
2574 {
2575         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2576
2577         return dd->rcv_err_status_cnt[24];
2578 }
2579
2580 static u64 access_rx_rbuf_csr_q_next_buf_parity_err_cnt(
2581                                 const struct cntr_entry *entry,
2582                                 void *context, int vl, int mode, u64 data)
2583 {
2584         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2585
2586         return dd->rcv_err_status_cnt[23];
2587 }
2588
2589 static u64 access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt(
2590                                 const struct cntr_entry *entry,
2591                                 void *context, int vl, int mode, u64 data)
2592 {
2593         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2594
2595         return dd->rcv_err_status_cnt[22];
2596 }
2597
2598 static u64 access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt(
2599                                 const struct cntr_entry *entry,
2600                                 void *context, int vl, int mode, u64 data)
2601 {
2602         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2603
2604         return dd->rcv_err_status_cnt[21];
2605 }
2606
2607 static u64 access_rx_rbuf_block_list_read_cor_err_cnt(
2608                                 const struct cntr_entry *entry,
2609                                 void *context, int vl, int mode, u64 data)
2610 {
2611         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2612
2613         return dd->rcv_err_status_cnt[20];
2614 }
2615
2616 static u64 access_rx_rbuf_block_list_read_unc_err_cnt(
2617                                 const struct cntr_entry *entry,
2618                                 void *context, int vl, int mode, u64 data)
2619 {
2620         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2621
2622         return dd->rcv_err_status_cnt[19];
2623 }
2624
2625 static u64 access_rx_rbuf_lookup_des_cor_err_cnt(const struct cntr_entry *entry,
2626                                                  void *context, int vl,
2627                                                  int mode, u64 data)
2628 {
2629         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2630
2631         return dd->rcv_err_status_cnt[18];
2632 }
2633
2634 static u64 access_rx_rbuf_lookup_des_unc_err_cnt(const struct cntr_entry *entry,
2635                                                  void *context, int vl,
2636                                                  int mode, u64 data)
2637 {
2638         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2639
2640         return dd->rcv_err_status_cnt[17];
2641 }
2642
2643 static u64 access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt(
2644                                 const struct cntr_entry *entry,
2645                                 void *context, int vl, int mode, u64 data)
2646 {
2647         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2648
2649         return dd->rcv_err_status_cnt[16];
2650 }
2651
2652 static u64 access_rx_rbuf_lookup_des_reg_unc_err_cnt(
2653                                 const struct cntr_entry *entry,
2654                                 void *context, int vl, int mode, u64 data)
2655 {
2656         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2657
2658         return dd->rcv_err_status_cnt[15];
2659 }
2660
2661 static u64 access_rx_rbuf_free_list_cor_err_cnt(const struct cntr_entry *entry,
2662                                                 void *context, int vl,
2663                                                 int mode, u64 data)
2664 {
2665         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2666
2667         return dd->rcv_err_status_cnt[14];
2668 }
2669
2670 static u64 access_rx_rbuf_free_list_unc_err_cnt(const struct cntr_entry *entry,
2671                                                 void *context, int vl,
2672                                                 int mode, u64 data)
2673 {
2674         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2675
2676         return dd->rcv_err_status_cnt[13];
2677 }
2678
2679 static u64 access_rx_rcv_fsm_encoding_err_cnt(const struct cntr_entry *entry,
2680                                               void *context, int vl, int mode,
2681                                               u64 data)
2682 {
2683         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2684
2685         return dd->rcv_err_status_cnt[12];
2686 }
2687
2688 static u64 access_rx_dma_flag_cor_err_cnt(const struct cntr_entry *entry,
2689                                           void *context, int vl, int mode,
2690                                           u64 data)
2691 {
2692         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2693
2694         return dd->rcv_err_status_cnt[11];
2695 }
2696
2697 static u64 access_rx_dma_flag_unc_err_cnt(const struct cntr_entry *entry,
2698                                           void *context, int vl, int mode,
2699                                           u64 data)
2700 {
2701         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2702
2703         return dd->rcv_err_status_cnt[10];
2704 }
2705
2706 static u64 access_rx_dc_sop_eop_parity_err_cnt(const struct cntr_entry *entry,
2707                                                void *context, int vl, int mode,
2708                                                u64 data)
2709 {
2710         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2711
2712         return dd->rcv_err_status_cnt[9];
2713 }
2714
2715 static u64 access_rx_rcv_csr_parity_err_cnt(const struct cntr_entry *entry,
2716                                             void *context, int vl, int mode,
2717                                             u64 data)
2718 {
2719         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2720
2721         return dd->rcv_err_status_cnt[8];
2722 }
2723
2724 static u64 access_rx_rcv_qp_map_table_cor_err_cnt(
2725                                 const struct cntr_entry *entry,
2726                                 void *context, int vl, int mode, u64 data)
2727 {
2728         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2729
2730         return dd->rcv_err_status_cnt[7];
2731 }
2732
2733 static u64 access_rx_rcv_qp_map_table_unc_err_cnt(
2734                                 const struct cntr_entry *entry,
2735                                 void *context, int vl, int mode, u64 data)
2736 {
2737         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2738
2739         return dd->rcv_err_status_cnt[6];
2740 }
2741
2742 static u64 access_rx_rcv_data_cor_err_cnt(const struct cntr_entry *entry,
2743                                           void *context, int vl, int mode,
2744                                           u64 data)
2745 {
2746         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2747
2748         return dd->rcv_err_status_cnt[5];
2749 }
2750
2751 static u64 access_rx_rcv_data_unc_err_cnt(const struct cntr_entry *entry,
2752                                           void *context, int vl, int mode,
2753                                           u64 data)
2754 {
2755         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2756
2757         return dd->rcv_err_status_cnt[4];
2758 }
2759
2760 static u64 access_rx_rcv_hdr_cor_err_cnt(const struct cntr_entry *entry,
2761                                          void *context, int vl, int mode,
2762                                          u64 data)
2763 {
2764         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2765
2766         return dd->rcv_err_status_cnt[3];
2767 }
2768
2769 static u64 access_rx_rcv_hdr_unc_err_cnt(const struct cntr_entry *entry,
2770                                          void *context, int vl, int mode,
2771                                          u64 data)
2772 {
2773         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2774
2775         return dd->rcv_err_status_cnt[2];
2776 }
2777
2778 static u64 access_rx_dc_intf_parity_err_cnt(const struct cntr_entry *entry,
2779                                             void *context, int vl, int mode,
2780                                             u64 data)
2781 {
2782         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2783
2784         return dd->rcv_err_status_cnt[1];
2785 }
2786
2787 static u64 access_rx_dma_csr_cor_err_cnt(const struct cntr_entry *entry,
2788                                          void *context, int vl, int mode,
2789                                          u64 data)
2790 {
2791         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2792
2793         return dd->rcv_err_status_cnt[0];
2794 }
2795
2796 /*
2797  * Software counters corresponding to each of the
2798  * error status bits within SendPioErrStatus
2799  */
2800 static u64 access_pio_pec_sop_head_parity_err_cnt(
2801                                 const struct cntr_entry *entry,
2802                                 void *context, int vl, int mode, u64 data)
2803 {
2804         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2805
2806         return dd->send_pio_err_status_cnt[35];
2807 }
2808
2809 static u64 access_pio_pcc_sop_head_parity_err_cnt(
2810                                 const struct cntr_entry *entry,
2811                                 void *context, int vl, int mode, u64 data)
2812 {
2813         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2814
2815         return dd->send_pio_err_status_cnt[34];
2816 }
2817
2818 static u64 access_pio_last_returned_cnt_parity_err_cnt(
2819                                 const struct cntr_entry *entry,
2820                                 void *context, int vl, int mode, u64 data)
2821 {
2822         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2823
2824         return dd->send_pio_err_status_cnt[33];
2825 }
2826
2827 static u64 access_pio_current_free_cnt_parity_err_cnt(
2828                                 const struct cntr_entry *entry,
2829                                 void *context, int vl, int mode, u64 data)
2830 {
2831         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2832
2833         return dd->send_pio_err_status_cnt[32];
2834 }
2835
2836 static u64 access_pio_reserved_31_err_cnt(const struct cntr_entry *entry,
2837                                           void *context, int vl, int mode,
2838                                           u64 data)
2839 {
2840         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2841
2842         return dd->send_pio_err_status_cnt[31];
2843 }
2844
2845 static u64 access_pio_reserved_30_err_cnt(const struct cntr_entry *entry,
2846                                           void *context, int vl, int mode,
2847                                           u64 data)
2848 {
2849         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2850
2851         return dd->send_pio_err_status_cnt[30];
2852 }
2853
2854 static u64 access_pio_ppmc_sop_len_err_cnt(const struct cntr_entry *entry,
2855                                            void *context, int vl, int mode,
2856                                            u64 data)
2857 {
2858         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2859
2860         return dd->send_pio_err_status_cnt[29];
2861 }
2862
2863 static u64 access_pio_ppmc_bqc_mem_parity_err_cnt(
2864                                 const struct cntr_entry *entry,
2865                                 void *context, int vl, int mode, u64 data)
2866 {
2867         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2868
2869         return dd->send_pio_err_status_cnt[28];
2870 }
2871
2872 static u64 access_pio_vl_fifo_parity_err_cnt(const struct cntr_entry *entry,
2873                                              void *context, int vl, int mode,
2874                                              u64 data)
2875 {
2876         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2877
2878         return dd->send_pio_err_status_cnt[27];
2879 }
2880
2881 static u64 access_pio_vlf_sop_parity_err_cnt(const struct cntr_entry *entry,
2882                                              void *context, int vl, int mode,
2883                                              u64 data)
2884 {
2885         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2886
2887         return dd->send_pio_err_status_cnt[26];
2888 }
2889
2890 static u64 access_pio_vlf_v1_len_parity_err_cnt(const struct cntr_entry *entry,
2891                                                 void *context, int vl,
2892                                                 int mode, u64 data)
2893 {
2894         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2895
2896         return dd->send_pio_err_status_cnt[25];
2897 }
2898
2899 static u64 access_pio_block_qw_count_parity_err_cnt(
2900                                 const struct cntr_entry *entry,
2901                                 void *context, int vl, int mode, u64 data)
2902 {
2903         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2904
2905         return dd->send_pio_err_status_cnt[24];
2906 }
2907
2908 static u64 access_pio_write_qw_valid_parity_err_cnt(
2909                                 const struct cntr_entry *entry,
2910                                 void *context, int vl, int mode, u64 data)
2911 {
2912         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2913
2914         return dd->send_pio_err_status_cnt[23];
2915 }
2916
2917 static u64 access_pio_state_machine_err_cnt(const struct cntr_entry *entry,
2918                                             void *context, int vl, int mode,
2919                                             u64 data)
2920 {
2921         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2922
2923         return dd->send_pio_err_status_cnt[22];
2924 }
2925
2926 static u64 access_pio_write_data_parity_err_cnt(const struct cntr_entry *entry,
2927                                                 void *context, int vl,
2928                                                 int mode, u64 data)
2929 {
2930         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2931
2932         return dd->send_pio_err_status_cnt[21];
2933 }
2934
2935 static u64 access_pio_host_addr_mem_cor_err_cnt(const struct cntr_entry *entry,
2936                                                 void *context, int vl,
2937                                                 int mode, u64 data)
2938 {
2939         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2940
2941         return dd->send_pio_err_status_cnt[20];
2942 }
2943
2944 static u64 access_pio_host_addr_mem_unc_err_cnt(const struct cntr_entry *entry,
2945                                                 void *context, int vl,
2946                                                 int mode, u64 data)
2947 {
2948         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2949
2950         return dd->send_pio_err_status_cnt[19];
2951 }
2952
2953 static u64 access_pio_pkt_evict_sm_or_arb_sm_err_cnt(
2954                                 const struct cntr_entry *entry,
2955                                 void *context, int vl, int mode, u64 data)
2956 {
2957         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2958
2959         return dd->send_pio_err_status_cnt[18];
2960 }
2961
2962 static u64 access_pio_init_sm_in_err_cnt(const struct cntr_entry *entry,
2963                                          void *context, int vl, int mode,
2964                                          u64 data)
2965 {
2966         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2967
2968         return dd->send_pio_err_status_cnt[17];
2969 }
2970
2971 static u64 access_pio_ppmc_pbl_fifo_err_cnt(const struct cntr_entry *entry,
2972                                             void *context, int vl, int mode,
2973                                             u64 data)
2974 {
2975         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2976
2977         return dd->send_pio_err_status_cnt[16];
2978 }
2979
2980 static u64 access_pio_credit_ret_fifo_parity_err_cnt(
2981                                 const struct cntr_entry *entry,
2982                                 void *context, int vl, int mode, u64 data)
2983 {
2984         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2985
2986         return dd->send_pio_err_status_cnt[15];
2987 }
2988
2989 static u64 access_pio_v1_len_mem_bank1_cor_err_cnt(
2990                                 const struct cntr_entry *entry,
2991                                 void *context, int vl, int mode, u64 data)
2992 {
2993         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
2994
2995         return dd->send_pio_err_status_cnt[14];
2996 }
2997
2998 static u64 access_pio_v1_len_mem_bank0_cor_err_cnt(
2999                                 const struct cntr_entry *entry,
3000                                 void *context, int vl, int mode, u64 data)
3001 {
3002         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3003
3004         return dd->send_pio_err_status_cnt[13];
3005 }
3006
3007 static u64 access_pio_v1_len_mem_bank1_unc_err_cnt(
3008                                 const struct cntr_entry *entry,
3009                                 void *context, int vl, int mode, u64 data)
3010 {
3011         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3012
3013         return dd->send_pio_err_status_cnt[12];
3014 }
3015
3016 static u64 access_pio_v1_len_mem_bank0_unc_err_cnt(
3017                                 const struct cntr_entry *entry,
3018                                 void *context, int vl, int mode, u64 data)
3019 {
3020         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3021
3022         return dd->send_pio_err_status_cnt[11];
3023 }
3024
3025 static u64 access_pio_sm_pkt_reset_parity_err_cnt(
3026                                 const struct cntr_entry *entry,
3027                                 void *context, int vl, int mode, u64 data)
3028 {
3029         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3030
3031         return dd->send_pio_err_status_cnt[10];
3032 }
3033
3034 static u64 access_pio_pkt_evict_fifo_parity_err_cnt(
3035                                 const struct cntr_entry *entry,
3036                                 void *context, int vl, int mode, u64 data)
3037 {
3038         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3039
3040         return dd->send_pio_err_status_cnt[9];
3041 }
3042
3043 static u64 access_pio_sbrdctrl_crrel_fifo_parity_err_cnt(
3044                                 const struct cntr_entry *entry,
3045                                 void *context, int vl, int mode, u64 data)
3046 {
3047         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3048
3049         return dd->send_pio_err_status_cnt[8];
3050 }
3051
3052 static u64 access_pio_sbrdctl_crrel_parity_err_cnt(
3053                                 const struct cntr_entry *entry,
3054                                 void *context, int vl, int mode, u64 data)
3055 {
3056         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3057
3058         return dd->send_pio_err_status_cnt[7];
3059 }
3060
3061 static u64 access_pio_pec_fifo_parity_err_cnt(const struct cntr_entry *entry,
3062                                               void *context, int vl, int mode,
3063                                               u64 data)
3064 {
3065         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3066
3067         return dd->send_pio_err_status_cnt[6];
3068 }
3069
3070 static u64 access_pio_pcc_fifo_parity_err_cnt(const struct cntr_entry *entry,
3071                                               void *context, int vl, int mode,
3072                                               u64 data)
3073 {
3074         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3075
3076         return dd->send_pio_err_status_cnt[5];
3077 }
3078
3079 static u64 access_pio_sb_mem_fifo1_err_cnt(const struct cntr_entry *entry,
3080                                            void *context, int vl, int mode,
3081                                            u64 data)
3082 {
3083         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3084
3085         return dd->send_pio_err_status_cnt[4];
3086 }
3087
3088 static u64 access_pio_sb_mem_fifo0_err_cnt(const struct cntr_entry *entry,
3089                                            void *context, int vl, int mode,
3090                                            u64 data)
3091 {
3092         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3093
3094         return dd->send_pio_err_status_cnt[3];
3095 }
3096
3097 static u64 access_pio_csr_parity_err_cnt(const struct cntr_entry *entry,
3098                                          void *context, int vl, int mode,
3099                                          u64 data)
3100 {
3101         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3102
3103         return dd->send_pio_err_status_cnt[2];
3104 }
3105
3106 static u64 access_pio_write_addr_parity_err_cnt(const struct cntr_entry *entry,
3107                                                 void *context, int vl,
3108                                                 int mode, u64 data)
3109 {
3110         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3111
3112         return dd->send_pio_err_status_cnt[1];
3113 }
3114
3115 static u64 access_pio_write_bad_ctxt_err_cnt(const struct cntr_entry *entry,
3116                                              void *context, int vl, int mode,
3117                                              u64 data)
3118 {
3119         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3120
3121         return dd->send_pio_err_status_cnt[0];
3122 }
3123
3124 /*
3125  * Software counters corresponding to each of the
3126  * error status bits within SendDmaErrStatus
3127  */
3128 static u64 access_sdma_pcie_req_tracking_cor_err_cnt(
3129                                 const struct cntr_entry *entry,
3130                                 void *context, int vl, int mode, u64 data)
3131 {
3132         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3133
3134         return dd->send_dma_err_status_cnt[3];
3135 }
3136
3137 static u64 access_sdma_pcie_req_tracking_unc_err_cnt(
3138                                 const struct cntr_entry *entry,
3139                                 void *context, int vl, int mode, u64 data)
3140 {
3141         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3142
3143         return dd->send_dma_err_status_cnt[2];
3144 }
3145
3146 static u64 access_sdma_csr_parity_err_cnt(const struct cntr_entry *entry,
3147                                           void *context, int vl, int mode,
3148                                           u64 data)
3149 {
3150         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3151
3152         return dd->send_dma_err_status_cnt[1];
3153 }
3154
3155 static u64 access_sdma_rpy_tag_err_cnt(const struct cntr_entry *entry,
3156                                        void *context, int vl, int mode,
3157                                        u64 data)
3158 {
3159         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3160
3161         return dd->send_dma_err_status_cnt[0];
3162 }
3163
3164 /*
3165  * Software counters corresponding to each of the
3166  * error status bits within SendEgressErrStatus
3167  */
3168 static u64 access_tx_read_pio_memory_csr_unc_err_cnt(
3169                                 const struct cntr_entry *entry,
3170                                 void *context, int vl, int mode, u64 data)
3171 {
3172         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3173
3174         return dd->send_egress_err_status_cnt[63];
3175 }
3176
3177 static u64 access_tx_read_sdma_memory_csr_err_cnt(
3178                                 const struct cntr_entry *entry,
3179                                 void *context, int vl, int mode, u64 data)
3180 {
3181         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3182
3183         return dd->send_egress_err_status_cnt[62];
3184 }
3185
3186 static u64 access_tx_egress_fifo_cor_err_cnt(const struct cntr_entry *entry,
3187                                              void *context, int vl, int mode,
3188                                              u64 data)
3189 {
3190         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3191
3192         return dd->send_egress_err_status_cnt[61];
3193 }
3194
3195 static u64 access_tx_read_pio_memory_cor_err_cnt(const struct cntr_entry *entry,
3196                                                  void *context, int vl,
3197                                                  int mode, u64 data)
3198 {
3199         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3200
3201         return dd->send_egress_err_status_cnt[60];
3202 }
3203
3204 static u64 access_tx_read_sdma_memory_cor_err_cnt(
3205                                 const struct cntr_entry *entry,
3206                                 void *context, int vl, int mode, u64 data)
3207 {
3208         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3209
3210         return dd->send_egress_err_status_cnt[59];
3211 }
3212
3213 static u64 access_tx_sb_hdr_cor_err_cnt(const struct cntr_entry *entry,
3214                                         void *context, int vl, int mode,
3215                                         u64 data)
3216 {
3217         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3218
3219         return dd->send_egress_err_status_cnt[58];
3220 }
3221
3222 static u64 access_tx_credit_overrun_err_cnt(const struct cntr_entry *entry,
3223                                             void *context, int vl, int mode,
3224                                             u64 data)
3225 {
3226         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3227
3228         return dd->send_egress_err_status_cnt[57];
3229 }
3230
3231 static u64 access_tx_launch_fifo8_cor_err_cnt(const struct cntr_entry *entry,
3232                                               void *context, int vl, int mode,
3233                                               u64 data)
3234 {
3235         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3236
3237         return dd->send_egress_err_status_cnt[56];
3238 }
3239
3240 static u64 access_tx_launch_fifo7_cor_err_cnt(const struct cntr_entry *entry,
3241                                               void *context, int vl, int mode,
3242                                               u64 data)
3243 {
3244         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3245
3246         return dd->send_egress_err_status_cnt[55];
3247 }
3248
3249 static u64 access_tx_launch_fifo6_cor_err_cnt(const struct cntr_entry *entry,
3250                                               void *context, int vl, int mode,
3251                                               u64 data)
3252 {
3253         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3254
3255         return dd->send_egress_err_status_cnt[54];
3256 }
3257
3258 static u64 access_tx_launch_fifo5_cor_err_cnt(const struct cntr_entry *entry,
3259                                               void *context, int vl, int mode,
3260                                               u64 data)
3261 {
3262         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3263
3264         return dd->send_egress_err_status_cnt[53];
3265 }
3266
3267 static u64 access_tx_launch_fifo4_cor_err_cnt(const struct cntr_entry *entry,
3268                                               void *context, int vl, int mode,
3269                                               u64 data)
3270 {
3271         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3272
3273         return dd->send_egress_err_status_cnt[52];
3274 }
3275
3276 static u64 access_tx_launch_fifo3_cor_err_cnt(const struct cntr_entry *entry,
3277                                               void *context, int vl, int mode,
3278                                               u64 data)
3279 {
3280         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3281
3282         return dd->send_egress_err_status_cnt[51];
3283 }
3284
3285 static u64 access_tx_launch_fifo2_cor_err_cnt(const struct cntr_entry *entry,
3286                                               void *context, int vl, int mode,
3287                                               u64 data)
3288 {
3289         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3290
3291         return dd->send_egress_err_status_cnt[50];
3292 }
3293
3294 static u64 access_tx_launch_fifo1_cor_err_cnt(const struct cntr_entry *entry,
3295                                               void *context, int vl, int mode,
3296                                               u64 data)
3297 {
3298         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3299
3300         return dd->send_egress_err_status_cnt[49];
3301 }
3302
3303 static u64 access_tx_launch_fifo0_cor_err_cnt(const struct cntr_entry *entry,
3304                                               void *context, int vl, int mode,
3305                                               u64 data)
3306 {
3307         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3308
3309         return dd->send_egress_err_status_cnt[48];
3310 }
3311
3312 static u64 access_tx_credit_return_vl_err_cnt(const struct cntr_entry *entry,
3313                                               void *context, int vl, int mode,
3314                                               u64 data)
3315 {
3316         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3317
3318         return dd->send_egress_err_status_cnt[47];
3319 }
3320
3321 static u64 access_tx_hcrc_insertion_err_cnt(const struct cntr_entry *entry,
3322                                             void *context, int vl, int mode,
3323                                             u64 data)
3324 {
3325         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3326
3327         return dd->send_egress_err_status_cnt[46];
3328 }
3329
3330 static u64 access_tx_egress_fifo_unc_err_cnt(const struct cntr_entry *entry,
3331                                              void *context, int vl, int mode,
3332                                              u64 data)
3333 {
3334         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3335
3336         return dd->send_egress_err_status_cnt[45];
3337 }
3338
3339 static u64 access_tx_read_pio_memory_unc_err_cnt(const struct cntr_entry *entry,
3340                                                  void *context, int vl,
3341                                                  int mode, u64 data)
3342 {
3343         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3344
3345         return dd->send_egress_err_status_cnt[44];
3346 }
3347
3348 static u64 access_tx_read_sdma_memory_unc_err_cnt(
3349                                 const struct cntr_entry *entry,
3350                                 void *context, int vl, int mode, u64 data)
3351 {
3352         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3353
3354         return dd->send_egress_err_status_cnt[43];
3355 }
3356
3357 static u64 access_tx_sb_hdr_unc_err_cnt(const struct cntr_entry *entry,
3358                                         void *context, int vl, int mode,
3359                                         u64 data)
3360 {
3361         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3362
3363         return dd->send_egress_err_status_cnt[42];
3364 }
3365
3366 static u64 access_tx_credit_return_partiy_err_cnt(
3367                                 const struct cntr_entry *entry,
3368                                 void *context, int vl, int mode, u64 data)
3369 {
3370         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3371
3372         return dd->send_egress_err_status_cnt[41];
3373 }
3374
3375 static u64 access_tx_launch_fifo8_unc_or_parity_err_cnt(
3376                                 const struct cntr_entry *entry,
3377                                 void *context, int vl, int mode, u64 data)
3378 {
3379         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3380
3381         return dd->send_egress_err_status_cnt[40];
3382 }
3383
3384 static u64 access_tx_launch_fifo7_unc_or_parity_err_cnt(
3385                                 const struct cntr_entry *entry,
3386                                 void *context, int vl, int mode, u64 data)
3387 {
3388         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3389
3390         return dd->send_egress_err_status_cnt[39];
3391 }
3392
3393 static u64 access_tx_launch_fifo6_unc_or_parity_err_cnt(
3394                                 const struct cntr_entry *entry,
3395                                 void *context, int vl, int mode, u64 data)
3396 {
3397         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3398
3399         return dd->send_egress_err_status_cnt[38];
3400 }
3401
3402 static u64 access_tx_launch_fifo5_unc_or_parity_err_cnt(
3403                                 const struct cntr_entry *entry,
3404                                 void *context, int vl, int mode, u64 data)
3405 {
3406         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3407
3408         return dd->send_egress_err_status_cnt[37];
3409 }
3410
3411 static u64 access_tx_launch_fifo4_unc_or_parity_err_cnt(
3412                                 const struct cntr_entry *entry,
3413                                 void *context, int vl, int mode, u64 data)
3414 {
3415         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3416
3417         return dd->send_egress_err_status_cnt[36];
3418 }
3419
3420 static u64 access_tx_launch_fifo3_unc_or_parity_err_cnt(
3421                                 const struct cntr_entry *entry,
3422                                 void *context, int vl, int mode, u64 data)
3423 {
3424         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3425
3426         return dd->send_egress_err_status_cnt[35];
3427 }
3428
3429 static u64 access_tx_launch_fifo2_unc_or_parity_err_cnt(
3430                                 const struct cntr_entry *entry,
3431                                 void *context, int vl, int mode, u64 data)
3432 {
3433         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3434
3435         return dd->send_egress_err_status_cnt[34];
3436 }
3437
3438 static u64 access_tx_launch_fifo1_unc_or_parity_err_cnt(
3439                                 const struct cntr_entry *entry,
3440                                 void *context, int vl, int mode, u64 data)
3441 {
3442         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3443
3444         return dd->send_egress_err_status_cnt[33];
3445 }
3446
3447 static u64 access_tx_launch_fifo0_unc_or_parity_err_cnt(
3448                                 const struct cntr_entry *entry,
3449                                 void *context, int vl, int mode, u64 data)
3450 {
3451         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3452
3453         return dd->send_egress_err_status_cnt[32];
3454 }
3455
3456 static u64 access_tx_sdma15_disallowed_packet_err_cnt(
3457                                 const struct cntr_entry *entry,
3458                                 void *context, int vl, int mode, u64 data)
3459 {
3460         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3461
3462         return dd->send_egress_err_status_cnt[31];
3463 }
3464
3465 static u64 access_tx_sdma14_disallowed_packet_err_cnt(
3466                                 const struct cntr_entry *entry,
3467                                 void *context, int vl, int mode, u64 data)
3468 {
3469         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3470
3471         return dd->send_egress_err_status_cnt[30];
3472 }
3473
3474 static u64 access_tx_sdma13_disallowed_packet_err_cnt(
3475                                 const struct cntr_entry *entry,
3476                                 void *context, int vl, int mode, u64 data)
3477 {
3478         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3479
3480         return dd->send_egress_err_status_cnt[29];
3481 }
3482
3483 static u64 access_tx_sdma12_disallowed_packet_err_cnt(
3484                                 const struct cntr_entry *entry,
3485                                 void *context, int vl, int mode, u64 data)
3486 {
3487         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3488
3489         return dd->send_egress_err_status_cnt[28];
3490 }
3491
3492 static u64 access_tx_sdma11_disallowed_packet_err_cnt(
3493                                 const struct cntr_entry *entry,
3494                                 void *context, int vl, int mode, u64 data)
3495 {
3496         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3497
3498         return dd->send_egress_err_status_cnt[27];
3499 }
3500
3501 static u64 access_tx_sdma10_disallowed_packet_err_cnt(
3502                                 const struct cntr_entry *entry,
3503                                 void *context, int vl, int mode, u64 data)
3504 {
3505         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3506
3507         return dd->send_egress_err_status_cnt[26];
3508 }
3509
3510 static u64 access_tx_sdma9_disallowed_packet_err_cnt(
3511                                 const struct cntr_entry *entry,
3512                                 void *context, int vl, int mode, u64 data)
3513 {
3514         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3515
3516         return dd->send_egress_err_status_cnt[25];
3517 }
3518
3519 static u64 access_tx_sdma8_disallowed_packet_err_cnt(
3520                                 const struct cntr_entry *entry,
3521                                 void *context, int vl, int mode, u64 data)
3522 {
3523         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3524
3525         return dd->send_egress_err_status_cnt[24];
3526 }
3527
3528 static u64 access_tx_sdma7_disallowed_packet_err_cnt(
3529                                 const struct cntr_entry *entry,
3530                                 void *context, int vl, int mode, u64 data)
3531 {
3532         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3533
3534         return dd->send_egress_err_status_cnt[23];
3535 }
3536
3537 static u64 access_tx_sdma6_disallowed_packet_err_cnt(
3538                                 const struct cntr_entry *entry,
3539                                 void *context, int vl, int mode, u64 data)
3540 {
3541         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3542
3543         return dd->send_egress_err_status_cnt[22];
3544 }
3545
3546 static u64 access_tx_sdma5_disallowed_packet_err_cnt(
3547                                 const struct cntr_entry *entry,
3548                                 void *context, int vl, int mode, u64 data)
3549 {
3550         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3551
3552         return dd->send_egress_err_status_cnt[21];
3553 }
3554
3555 static u64 access_tx_sdma4_disallowed_packet_err_cnt(
3556                                 const struct cntr_entry *entry,
3557                                 void *context, int vl, int mode, u64 data)
3558 {
3559         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3560
3561         return dd->send_egress_err_status_cnt[20];
3562 }
3563
3564 static u64 access_tx_sdma3_disallowed_packet_err_cnt(
3565                                 const struct cntr_entry *entry,
3566                                 void *context, int vl, int mode, u64 data)
3567 {
3568         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3569
3570         return dd->send_egress_err_status_cnt[19];
3571 }
3572
3573 static u64 access_tx_sdma2_disallowed_packet_err_cnt(
3574                                 const struct cntr_entry *entry,
3575                                 void *context, int vl, int mode, u64 data)
3576 {
3577         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3578
3579         return dd->send_egress_err_status_cnt[18];
3580 }
3581
3582 static u64 access_tx_sdma1_disallowed_packet_err_cnt(
3583                                 const struct cntr_entry *entry,
3584                                 void *context, int vl, int mode, u64 data)
3585 {
3586         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3587
3588         return dd->send_egress_err_status_cnt[17];
3589 }
3590
3591 static u64 access_tx_sdma0_disallowed_packet_err_cnt(
3592                                 const struct cntr_entry *entry,
3593                                 void *context, int vl, int mode, u64 data)
3594 {
3595         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3596
3597         return dd->send_egress_err_status_cnt[16];
3598 }
3599
3600 static u64 access_tx_config_parity_err_cnt(const struct cntr_entry *entry,
3601                                            void *context, int vl, int mode,
3602                                            u64 data)
3603 {
3604         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3605
3606         return dd->send_egress_err_status_cnt[15];
3607 }
3608
3609 static u64 access_tx_sbrd_ctl_csr_parity_err_cnt(const struct cntr_entry *entry,
3610                                                  void *context, int vl,
3611                                                  int mode, u64 data)
3612 {
3613         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3614
3615         return dd->send_egress_err_status_cnt[14];
3616 }
3617
3618 static u64 access_tx_launch_csr_parity_err_cnt(const struct cntr_entry *entry,
3619                                                void *context, int vl, int mode,
3620                                                u64 data)
3621 {
3622         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3623
3624         return dd->send_egress_err_status_cnt[13];
3625 }
3626
3627 static u64 access_tx_illegal_vl_err_cnt(const struct cntr_entry *entry,
3628                                         void *context, int vl, int mode,
3629                                         u64 data)
3630 {
3631         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3632
3633         return dd->send_egress_err_status_cnt[12];
3634 }
3635
3636 static u64 access_tx_sbrd_ctl_state_machine_parity_err_cnt(
3637                                 const struct cntr_entry *entry,
3638                                 void *context, int vl, int mode, u64 data)
3639 {
3640         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3641
3642         return dd->send_egress_err_status_cnt[11];
3643 }
3644
3645 static u64 access_egress_reserved_10_err_cnt(const struct cntr_entry *entry,
3646                                              void *context, int vl, int mode,
3647                                              u64 data)
3648 {
3649         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3650
3651         return dd->send_egress_err_status_cnt[10];
3652 }
3653
3654 static u64 access_egress_reserved_9_err_cnt(const struct cntr_entry *entry,
3655                                             void *context, int vl, int mode,
3656                                             u64 data)
3657 {
3658         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3659
3660         return dd->send_egress_err_status_cnt[9];
3661 }
3662
3663 static u64 access_tx_sdma_launch_intf_parity_err_cnt(
3664                                 const struct cntr_entry *entry,
3665                                 void *context, int vl, int mode, u64 data)
3666 {
3667         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3668
3669         return dd->send_egress_err_status_cnt[8];
3670 }
3671
3672 static u64 access_tx_pio_launch_intf_parity_err_cnt(
3673                                 const struct cntr_entry *entry,
3674                                 void *context, int vl, int mode, u64 data)
3675 {
3676         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3677
3678         return dd->send_egress_err_status_cnt[7];
3679 }
3680
3681 static u64 access_egress_reserved_6_err_cnt(const struct cntr_entry *entry,
3682                                             void *context, int vl, int mode,
3683                                             u64 data)
3684 {
3685         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3686
3687         return dd->send_egress_err_status_cnt[6];
3688 }
3689
3690 static u64 access_tx_incorrect_link_state_err_cnt(
3691                                 const struct cntr_entry *entry,
3692                                 void *context, int vl, int mode, u64 data)
3693 {
3694         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3695
3696         return dd->send_egress_err_status_cnt[5];
3697 }
3698
3699 static u64 access_tx_linkdown_err_cnt(const struct cntr_entry *entry,
3700                                       void *context, int vl, int mode,
3701                                       u64 data)
3702 {
3703         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3704
3705         return dd->send_egress_err_status_cnt[4];
3706 }
3707
3708 static u64 access_tx_egress_fifi_underrun_or_parity_err_cnt(
3709                                 const struct cntr_entry *entry,
3710                                 void *context, int vl, int mode, u64 data)
3711 {
3712         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3713
3714         return dd->send_egress_err_status_cnt[3];
3715 }
3716
3717 static u64 access_egress_reserved_2_err_cnt(const struct cntr_entry *entry,
3718                                             void *context, int vl, int mode,
3719                                             u64 data)
3720 {
3721         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3722
3723         return dd->send_egress_err_status_cnt[2];
3724 }
3725
3726 static u64 access_tx_pkt_integrity_mem_unc_err_cnt(
3727                                 const struct cntr_entry *entry,
3728                                 void *context, int vl, int mode, u64 data)
3729 {
3730         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3731
3732         return dd->send_egress_err_status_cnt[1];
3733 }
3734
3735 static u64 access_tx_pkt_integrity_mem_cor_err_cnt(
3736                                 const struct cntr_entry *entry,
3737                                 void *context, int vl, int mode, u64 data)
3738 {
3739         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3740
3741         return dd->send_egress_err_status_cnt[0];
3742 }
3743
3744 /*
3745  * Software counters corresponding to each of the
3746  * error status bits within SendErrStatus
3747  */
3748 static u64 access_send_csr_write_bad_addr_err_cnt(
3749                                 const struct cntr_entry *entry,
3750                                 void *context, int vl, int mode, u64 data)
3751 {
3752         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3753
3754         return dd->send_err_status_cnt[2];
3755 }
3756
3757 static u64 access_send_csr_read_bad_addr_err_cnt(const struct cntr_entry *entry,
3758                                                  void *context, int vl,
3759                                                  int mode, u64 data)
3760 {
3761         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3762
3763         return dd->send_err_status_cnt[1];
3764 }
3765
3766 static u64 access_send_csr_parity_cnt(const struct cntr_entry *entry,
3767                                       void *context, int vl, int mode,
3768                                       u64 data)
3769 {
3770         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3771
3772         return dd->send_err_status_cnt[0];
3773 }
3774
3775 /*
3776  * Software counters corresponding to each of the
3777  * error status bits within SendCtxtErrStatus
3778  */
3779 static u64 access_pio_write_out_of_bounds_err_cnt(
3780                                 const struct cntr_entry *entry,
3781                                 void *context, int vl, int mode, u64 data)
3782 {
3783         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3784
3785         return dd->sw_ctxt_err_status_cnt[4];
3786 }
3787
3788 static u64 access_pio_write_overflow_err_cnt(const struct cntr_entry *entry,
3789                                              void *context, int vl, int mode,
3790                                              u64 data)
3791 {
3792         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3793
3794         return dd->sw_ctxt_err_status_cnt[3];
3795 }
3796
3797 static u64 access_pio_write_crosses_boundary_err_cnt(
3798                                 const struct cntr_entry *entry,
3799                                 void *context, int vl, int mode, u64 data)
3800 {
3801         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3802
3803         return dd->sw_ctxt_err_status_cnt[2];
3804 }
3805
3806 static u64 access_pio_disallowed_packet_err_cnt(const struct cntr_entry *entry,
3807                                                 void *context, int vl,
3808                                                 int mode, u64 data)
3809 {
3810         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3811
3812         return dd->sw_ctxt_err_status_cnt[1];
3813 }
3814
3815 static u64 access_pio_inconsistent_sop_err_cnt(const struct cntr_entry *entry,
3816                                                void *context, int vl, int mode,
3817                                                u64 data)
3818 {
3819         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3820
3821         return dd->sw_ctxt_err_status_cnt[0];
3822 }
3823
3824 /*
3825  * Software counters corresponding to each of the
3826  * error status bits within SendDmaEngErrStatus
3827  */
3828 static u64 access_sdma_header_request_fifo_cor_err_cnt(
3829                                 const struct cntr_entry *entry,
3830                                 void *context, int vl, int mode, u64 data)
3831 {
3832         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3833
3834         return dd->sw_send_dma_eng_err_status_cnt[23];
3835 }
3836
3837 static u64 access_sdma_header_storage_cor_err_cnt(
3838                                 const struct cntr_entry *entry,
3839                                 void *context, int vl, int mode, u64 data)
3840 {
3841         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3842
3843         return dd->sw_send_dma_eng_err_status_cnt[22];
3844 }
3845
3846 static u64 access_sdma_packet_tracking_cor_err_cnt(
3847                                 const struct cntr_entry *entry,
3848                                 void *context, int vl, int mode, u64 data)
3849 {
3850         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3851
3852         return dd->sw_send_dma_eng_err_status_cnt[21];
3853 }
3854
3855 static u64 access_sdma_assembly_cor_err_cnt(const struct cntr_entry *entry,
3856                                             void *context, int vl, int mode,
3857                                             u64 data)
3858 {
3859         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3860
3861         return dd->sw_send_dma_eng_err_status_cnt[20];
3862 }
3863
3864 static u64 access_sdma_desc_table_cor_err_cnt(const struct cntr_entry *entry,
3865                                               void *context, int vl, int mode,
3866                                               u64 data)
3867 {
3868         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3869
3870         return dd->sw_send_dma_eng_err_status_cnt[19];
3871 }
3872
3873 static u64 access_sdma_header_request_fifo_unc_err_cnt(
3874                                 const struct cntr_entry *entry,
3875                                 void *context, int vl, int mode, u64 data)
3876 {
3877         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3878
3879         return dd->sw_send_dma_eng_err_status_cnt[18];
3880 }
3881
3882 static u64 access_sdma_header_storage_unc_err_cnt(
3883                                 const struct cntr_entry *entry,
3884                                 void *context, int vl, int mode, u64 data)
3885 {
3886         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3887
3888         return dd->sw_send_dma_eng_err_status_cnt[17];
3889 }
3890
3891 static u64 access_sdma_packet_tracking_unc_err_cnt(
3892                                 const struct cntr_entry *entry,
3893                                 void *context, int vl, int mode, u64 data)
3894 {
3895         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3896
3897         return dd->sw_send_dma_eng_err_status_cnt[16];
3898 }
3899
3900 static u64 access_sdma_assembly_unc_err_cnt(const struct cntr_entry *entry,
3901                                             void *context, int vl, int mode,
3902                                             u64 data)
3903 {
3904         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3905
3906         return dd->sw_send_dma_eng_err_status_cnt[15];
3907 }
3908
3909 static u64 access_sdma_desc_table_unc_err_cnt(const struct cntr_entry *entry,
3910                                               void *context, int vl, int mode,
3911                                               u64 data)
3912 {
3913         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3914
3915         return dd->sw_send_dma_eng_err_status_cnt[14];
3916 }
3917
3918 static u64 access_sdma_timeout_err_cnt(const struct cntr_entry *entry,
3919                                        void *context, int vl, int mode,
3920                                        u64 data)
3921 {
3922         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3923
3924         return dd->sw_send_dma_eng_err_status_cnt[13];
3925 }
3926
3927 static u64 access_sdma_header_length_err_cnt(const struct cntr_entry *entry,
3928                                              void *context, int vl, int mode,
3929                                              u64 data)
3930 {
3931         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3932
3933         return dd->sw_send_dma_eng_err_status_cnt[12];
3934 }
3935
3936 static u64 access_sdma_header_address_err_cnt(const struct cntr_entry *entry,
3937                                               void *context, int vl, int mode,
3938                                               u64 data)
3939 {
3940         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3941
3942         return dd->sw_send_dma_eng_err_status_cnt[11];
3943 }
3944
3945 static u64 access_sdma_header_select_err_cnt(const struct cntr_entry *entry,
3946                                              void *context, int vl, int mode,
3947                                              u64 data)
3948 {
3949         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3950
3951         return dd->sw_send_dma_eng_err_status_cnt[10];
3952 }
3953
3954 static u64 access_sdma_reserved_9_err_cnt(const struct cntr_entry *entry,
3955                                           void *context, int vl, int mode,
3956                                           u64 data)
3957 {
3958         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3959
3960         return dd->sw_send_dma_eng_err_status_cnt[9];
3961 }
3962
3963 static u64 access_sdma_packet_desc_overflow_err_cnt(
3964                                 const struct cntr_entry *entry,
3965                                 void *context, int vl, int mode, u64 data)
3966 {
3967         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3968
3969         return dd->sw_send_dma_eng_err_status_cnt[8];
3970 }
3971
3972 static u64 access_sdma_length_mismatch_err_cnt(const struct cntr_entry *entry,
3973                                                void *context, int vl,
3974                                                int mode, u64 data)
3975 {
3976         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3977
3978         return dd->sw_send_dma_eng_err_status_cnt[7];
3979 }
3980
3981 static u64 access_sdma_halt_err_cnt(const struct cntr_entry *entry,
3982                                     void *context, int vl, int mode, u64 data)
3983 {
3984         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3985
3986         return dd->sw_send_dma_eng_err_status_cnt[6];
3987 }
3988
3989 static u64 access_sdma_mem_read_err_cnt(const struct cntr_entry *entry,
3990                                         void *context, int vl, int mode,
3991                                         u64 data)
3992 {
3993         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
3994
3995         return dd->sw_send_dma_eng_err_status_cnt[5];
3996 }
3997
3998 static u64 access_sdma_first_desc_err_cnt(const struct cntr_entry *entry,
3999                                           void *context, int vl, int mode,
4000                                           u64 data)
4001 {
4002         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4003
4004         return dd->sw_send_dma_eng_err_status_cnt[4];
4005 }
4006
4007 static u64 access_sdma_tail_out_of_bounds_err_cnt(
4008                                 const struct cntr_entry *entry,
4009                                 void *context, int vl, int mode, u64 data)
4010 {
4011         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4012
4013         return dd->sw_send_dma_eng_err_status_cnt[3];
4014 }
4015
4016 static u64 access_sdma_too_long_err_cnt(const struct cntr_entry *entry,
4017                                         void *context, int vl, int mode,
4018                                         u64 data)
4019 {
4020         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4021
4022         return dd->sw_send_dma_eng_err_status_cnt[2];
4023 }
4024
4025 static u64 access_sdma_gen_mismatch_err_cnt(const struct cntr_entry *entry,
4026                                             void *context, int vl, int mode,
4027                                             u64 data)
4028 {
4029         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4030
4031         return dd->sw_send_dma_eng_err_status_cnt[1];
4032 }
4033
4034 static u64 access_sdma_wrong_dw_err_cnt(const struct cntr_entry *entry,
4035                                         void *context, int vl, int mode,
4036                                         u64 data)
4037 {
4038         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4039
4040         return dd->sw_send_dma_eng_err_status_cnt[0];
4041 }
4042
4043 static u64 access_dc_rcv_err_cnt(const struct cntr_entry *entry,
4044                                  void *context, int vl, int mode,
4045                                  u64 data)
4046 {
4047         struct hfi1_devdata *dd = (struct hfi1_devdata *)context;
4048
4049         u64 val = 0;
4050         u64 csr = entry->csr;
4051
4052         val = read_write_csr(dd, csr, mode, data);
4053         if (mode == CNTR_MODE_R) {
4054                 val = val > CNTR_MAX - dd->sw_rcv_bypass_packet_errors ?
4055                         CNTR_MAX : val + dd->sw_rcv_bypass_packet_errors;
4056         } else if (mode == CNTR_MODE_W) {
4057                 dd->sw_rcv_bypass_packet_errors = 0;
4058         } else {
4059                 dd_dev_err(dd, "Invalid cntr register access mode");
4060                 return 0;
4061         }
4062         return val;
4063 }
4064
4065 #define def_access_sw_cpu(cntr) \
4066 static u64 access_sw_cpu_##cntr(const struct cntr_entry *entry,               \
4067                               void *context, int vl, int mode, u64 data)      \
4068 {                                                                             \
4069         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4070         return read_write_cpu(ppd->dd, &ppd->ibport_data.rvp.z_ ##cntr,       \
4071                               ppd->ibport_data.rvp.cntr, vl,                  \
4072                               mode, data);                                    \
4073 }
4074
4075 def_access_sw_cpu(rc_acks);
4076 def_access_sw_cpu(rc_qacks);
4077 def_access_sw_cpu(rc_delayed_comp);
4078
4079 #define def_access_ibp_counter(cntr) \
4080 static u64 access_ibp_##cntr(const struct cntr_entry *entry,                  \
4081                                 void *context, int vl, int mode, u64 data)    \
4082 {                                                                             \
4083         struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)context;        \
4084                                                                               \
4085         if (vl != CNTR_INVALID_VL)                                            \
4086                 return 0;                                                     \
4087                                                                               \
4088         return read_write_sw(ppd->dd, &ppd->ibport_data.rvp.n_ ##cntr,        \
4089                              mode, data);                                     \
4090 }
4091
4092 def_access_ibp_counter(loop_pkts);
4093 def_access_ibp_counter(rc_resends);
4094 def_access_ibp_counter(rnr_naks);
4095 def_access_ibp_counter(other_naks);
4096 def_access_ibp_counter(rc_timeouts);
4097 def_access_ibp_counter(pkt_drops);
4098 def_access_ibp_counter(dmawait);
4099 def_access_ibp_counter(rc_seqnak);
4100 def_access_ibp_counter(rc_dupreq);
4101 def_access_ibp_counter(rdma_seq);
4102 def_access_ibp_counter(unaligned);
4103 def_access_ibp_counter(seq_naks);
4104
4105 static struct cntr_entry dev_cntrs[DEV_CNTR_LAST] = {
4106 [C_RCV_OVF] = RXE32_DEV_CNTR_ELEM(RcvOverflow, RCV_BUF_OVFL_CNT, CNTR_SYNTH),
4107 [C_RX_LEN_ERR] = RXE32_DEV_CNTR_ELEM(RxLenErr, RCV_LENGTH_ERR_CNT, CNTR_SYNTH),
4108 [C_RX_ICRC_ERR] = RXE32_DEV_CNTR_ELEM(RxICrcErr, RCV_ICRC_ERR_CNT, CNTR_SYNTH),
4109 [C_RX_EBP] = RXE32_DEV_CNTR_ELEM(RxEbpCnt, RCV_EBP_CNT, CNTR_SYNTH),
4110 [C_RX_TID_FULL] = RXE32_DEV_CNTR_ELEM(RxTIDFullEr, RCV_TID_FULL_ERR_CNT,
4111                         CNTR_NORMAL),
4112 [C_RX_TID_INVALID] = RXE32_DEV_CNTR_ELEM(RxTIDInvalid, RCV_TID_VALID_ERR_CNT,
4113                         CNTR_NORMAL),
4114 [C_RX_TID_FLGMS] = RXE32_DEV_CNTR_ELEM(RxTidFLGMs,
4115                         RCV_TID_FLOW_GEN_MISMATCH_CNT,
4116                         CNTR_NORMAL),
4117 [C_RX_CTX_EGRS] = RXE32_DEV_CNTR_ELEM(RxCtxEgrS, RCV_CONTEXT_EGR_STALL,
4118                         CNTR_NORMAL),
4119 [C_RCV_TID_FLSMS] = RXE32_DEV_CNTR_ELEM(RxTidFLSMs,
4120                         RCV_TID_FLOW_SEQ_MISMATCH_CNT, CNTR_NORMAL),
4121 [C_CCE_PCI_CR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciCrSt,
4122                         CCE_PCIE_POSTED_CRDT_STALL_CNT, CNTR_NORMAL),
4123 [C_CCE_PCI_TR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePciTrSt, CCE_PCIE_TRGT_STALL_CNT,
4124                         CNTR_NORMAL),
4125 [C_CCE_PIO_WR_ST] = CCE_PERF_DEV_CNTR_ELEM(CcePioWrSt, CCE_PIO_WR_STALL_CNT,
4126                         CNTR_NORMAL),
4127 [C_CCE_ERR_INT] = CCE_INT_DEV_CNTR_ELEM(CceErrInt, CCE_ERR_INT_CNT,
4128                         CNTR_NORMAL),
4129 [C_CCE_SDMA_INT] = CCE_INT_DEV_CNTR_ELEM(CceSdmaInt, CCE_SDMA_INT_CNT,
4130                         CNTR_NORMAL),
4131 [C_CCE_MISC_INT] = CCE_INT_DEV_CNTR_ELEM(CceMiscInt, CCE_MISC_INT_CNT,
4132                         CNTR_NORMAL),
4133 [C_CCE_RCV_AV_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvAvInt, CCE_RCV_AVAIL_INT_CNT,
4134                         CNTR_NORMAL),
4135 [C_CCE_RCV_URG_INT] = CCE_INT_DEV_CNTR_ELEM(CceRcvUrgInt,
4136                         CCE_RCV_URGENT_INT_CNT, CNTR_NORMAL),
4137 [C_CCE_SEND_CR_INT] = CCE_INT_DEV_CNTR_ELEM(CceSndCrInt,
4138                         CCE_SEND_CREDIT_INT_CNT, CNTR_NORMAL),
4139 [C_DC_UNC_ERR] = DC_PERF_CNTR(DcUnctblErr, DCC_ERR_UNCORRECTABLE_CNT,
4140                               CNTR_SYNTH),
4141 [C_DC_RCV_ERR] = CNTR_ELEM("DcRecvErr", DCC_ERR_PORTRCV_ERR_CNT, 0, CNTR_SYNTH,
4142                             access_dc_rcv_err_cnt),
4143 [C_DC_FM_CFG_ERR] = DC_PERF_CNTR(DcFmCfgErr, DCC_ERR_FMCONFIG_ERR_CNT,
4144                                  CNTR_SYNTH),
4145 [C_DC_RMT_PHY_ERR] = DC_PERF_CNTR(DcRmtPhyErr, DCC_ERR_RCVREMOTE_PHY_ERR_CNT,
4146                                   CNTR_SYNTH),
4147 [C_DC_DROPPED_PKT] = DC_PERF_CNTR(DcDroppedPkt, DCC_ERR_DROPPED_PKT_CNT,
4148                                   CNTR_SYNTH),
4149 [C_DC_MC_XMIT_PKTS] = DC_PERF_CNTR(DcMcXmitPkts,
4150                                    DCC_PRF_PORT_XMIT_MULTICAST_CNT, CNTR_SYNTH),
4151 [C_DC_MC_RCV_PKTS] = DC_PERF_CNTR(DcMcRcvPkts,
4152                                   DCC_PRF_PORT_RCV_MULTICAST_PKT_CNT,
4153                                   CNTR_SYNTH),
4154 [C_DC_XMIT_CERR] = DC_PERF_CNTR(DcXmitCorr,
4155                                 DCC_PRF_PORT_XMIT_CORRECTABLE_CNT, CNTR_SYNTH),
4156 [C_DC_RCV_CERR] = DC_PERF_CNTR(DcRcvCorrCnt, DCC_PRF_PORT_RCV_CORRECTABLE_CNT,
4157                                CNTR_SYNTH),
4158 [C_DC_RCV_FCC] = DC_PERF_CNTR(DcRxFCntl, DCC_PRF_RX_FLOW_CRTL_CNT,
4159                               CNTR_SYNTH),
4160 [C_DC_XMIT_FCC] = DC_PERF_CNTR(DcXmitFCntl, DCC_PRF_TX_FLOW_CRTL_CNT,
4161                                CNTR_SYNTH),
4162 [C_DC_XMIT_FLITS] = DC_PERF_CNTR(DcXmitFlits, DCC_PRF_PORT_XMIT_DATA_CNT,
4163                                  CNTR_SYNTH),
4164 [C_DC_RCV_FLITS] = DC_PERF_CNTR(DcRcvFlits, DCC_PRF_PORT_RCV_DATA_CNT,
4165                                 CNTR_SYNTH),
4166 [C_DC_XMIT_PKTS] = DC_PERF_CNTR(DcXmitPkts, DCC_PRF_PORT_XMIT_PKTS_CNT,
4167                                 CNTR_SYNTH),
4168 [C_DC_RCV_PKTS] = DC_PERF_CNTR(DcRcvPkts, DCC_PRF_PORT_RCV_PKTS_CNT,
4169                                CNTR_SYNTH),
4170 [C_DC_RX_FLIT_VL] = DC_PERF_CNTR(DcRxFlitVl, DCC_PRF_PORT_VL_RCV_DATA_CNT,
4171                                  CNTR_SYNTH | CNTR_VL),
4172 [C_DC_RX_PKT_VL] = DC_PERF_CNTR(DcRxPktVl, DCC_PRF_PORT_VL_RCV_PKTS_CNT,
4173                                 CNTR_SYNTH | CNTR_VL),
4174 [C_DC_RCV_FCN] = DC_PERF_CNTR(DcRcvFcn, DCC_PRF_PORT_RCV_FECN_CNT, CNTR_SYNTH),
4175 [C_DC_RCV_FCN_VL] = DC_PERF_CNTR(DcRcvFcnVl, DCC_PRF_PORT_VL_RCV_FECN_CNT,
4176                                  CNTR_SYNTH | CNTR_VL),
4177 [C_DC_RCV_BCN] = DC_PERF_CNTR(DcRcvBcn, DCC_PRF_PORT_RCV_BECN_CNT, CNTR_SYNTH),
4178 [C_DC_RCV_BCN_VL] = DC_PERF_CNTR(DcRcvBcnVl, DCC_PRF_PORT_VL_RCV_BECN_CNT,
4179                                  CNTR_SYNTH | CNTR_VL),
4180 [C_DC_RCV_BBL] = DC_PERF_CNTR(DcRcvBbl, DCC_PRF_PORT_RCV_BUBBLE_CNT,
4181                               CNTR_SYNTH),
4182 [C_DC_RCV_BBL_VL] = DC_PERF_CNTR(DcRcvBblVl, DCC_PRF_PORT_VL_RCV_BUBBLE_CNT,
4183                                  CNTR_SYNTH | CNTR_VL),
4184 [C_DC_MARK_FECN] = DC_PERF_CNTR(DcMarkFcn, DCC_PRF_PORT_MARK_FECN_CNT,
4185                                 CNTR_SYNTH),
4186 [C_DC_MARK_FECN_VL] = DC_PERF_CNTR(DcMarkFcnVl, DCC_PRF_PORT_VL_MARK_FECN_CNT,
4187                                    CNTR_SYNTH | CNTR_VL),
4188 [C_DC_TOTAL_CRC] =
4189         DC_PERF_CNTR_LCB(DcTotCrc, DC_LCB_ERR_INFO_TOTAL_CRC_ERR,
4190                          CNTR_SYNTH),
4191 [C_DC_CRC_LN0] = DC_PERF_CNTR_LCB(DcCrcLn0, DC_LCB_ERR_INFO_CRC_ERR_LN0,
4192                                   CNTR_SYNTH),
4193 [C_DC_CRC_LN1] = DC_PERF_CNTR_LCB(DcCrcLn1, DC_LCB_ERR_INFO_CRC_ERR_LN1,
4194                                   CNTR_SYNTH),
4195 [C_DC_CRC_LN2] = DC_PERF_CNTR_LCB(DcCrcLn2, DC_LCB_ERR_INFO_CRC_ERR_LN2,
4196                                   CNTR_SYNTH),
4197 [C_DC_CRC_LN3] = DC_PERF_CNTR_LCB(DcCrcLn3, DC_LCB_ERR_INFO_CRC_ERR_LN3,
4198                                   CNTR_SYNTH),
4199 [C_DC_CRC_MULT_LN] =
4200         DC_PERF_CNTR_LCB(DcMultLn, DC_LCB_ERR_INFO_CRC_ERR_MULTI_LN,
4201                          CNTR_SYNTH),
4202 [C_DC_TX_REPLAY] = DC_PERF_CNTR_LCB(DcTxReplay, DC_LCB_ERR_INFO_TX_REPLAY_CNT,
4203                                     CNTR_SYNTH),
4204 [C_DC_RX_REPLAY] = DC_PERF_CNTR_LCB(DcRxReplay, DC_LCB_ERR_INFO_RX_REPLAY_CNT,
4205                                     CNTR_SYNTH),
4206 [C_DC_SEQ_CRC_CNT] =
4207         DC_PERF_CNTR_LCB(DcLinkSeqCrc, DC_LCB_ERR_INFO_SEQ_CRC_CNT,
4208                          CNTR_SYNTH),
4209 [C_DC_ESC0_ONLY_CNT] =
4210         DC_PERF_CNTR_LCB(DcEsc0, DC_LCB_ERR_INFO_ESCAPE_0_ONLY_CNT,
4211                          CNTR_SYNTH),
4212 [C_DC_ESC0_PLUS1_CNT] =
4213         DC_PERF_CNTR_LCB(DcEsc1, DC_LCB_ERR_INFO_ESCAPE_0_PLUS1_CNT,
4214                          CNTR_SYNTH),
4215 [C_DC_ESC0_PLUS2_CNT] =
4216         DC_PERF_CNTR_LCB(DcEsc0Plus2, DC_LCB_ERR_INFO_ESCAPE_0_PLUS2_CNT,
4217                          CNTR_SYNTH),
4218 [C_DC_REINIT_FROM_PEER_CNT] =
4219         DC_PERF_CNTR_LCB(DcReinitPeer, DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT,
4220                          CNTR_SYNTH),
4221 [C_DC_SBE_CNT] = DC_PERF_CNTR_LCB(DcSbe, DC_LCB_ERR_INFO_SBE_CNT,
4222                                   CNTR_SYNTH),
4223 [C_DC_MISC_FLG_CNT] =
4224         DC_PERF_CNTR_LCB(DcMiscFlg, DC_LCB_ERR_INFO_MISC_FLG_CNT,
4225                          CNTR_SYNTH),
4226 [C_DC_PRF_GOOD_LTP_CNT] =
4227         DC_PERF_CNTR_LCB(DcGoodLTP, DC_LCB_PRF_GOOD_LTP_CNT, CNTR_SYNTH),
4228 [C_DC_PRF_ACCEPTED_LTP_CNT] =
4229         DC_PERF_CNTR_LCB(DcAccLTP, DC_LCB_PRF_ACCEPTED_LTP_CNT,
4230                          CNTR_SYNTH),
4231 [C_DC_PRF_RX_FLIT_CNT] =
4232         DC_PERF_CNTR_LCB(DcPrfRxFlit, DC_LCB_PRF_RX_FLIT_CNT, CNTR_SYNTH),
4233 [C_DC_PRF_TX_FLIT_CNT] =
4234         DC_PERF_CNTR_LCB(DcPrfTxFlit, DC_LCB_PRF_TX_FLIT_CNT, CNTR_SYNTH),
4235 [C_DC_PRF_CLK_CNTR] =
4236         DC_PERF_CNTR_LCB(DcPrfClk, DC_LCB_PRF_CLK_CNTR, CNTR_SYNTH),
4237 [C_DC_PG_DBG_FLIT_CRDTS_CNT] =
4238         DC_PERF_CNTR_LCB(DcFltCrdts, DC_LCB_PG_DBG_FLIT_CRDTS_CNT, CNTR_SYNTH),
4239 [C_DC_PG_STS_PAUSE_COMPLETE_CNT] =
4240         DC_PERF_CNTR_LCB(DcPauseComp, DC_LCB_PG_STS_PAUSE_COMPLETE_CNT,
4241                          CNTR_SYNTH),
4242 [C_DC_PG_STS_TX_SBE_CNT] =
4243         DC_PERF_CNTR_LCB(DcStsTxSbe, DC_LCB_PG_STS_TX_SBE_CNT, CNTR_SYNTH),
4244 [C_DC_PG_STS_TX_MBE_CNT] =
4245         DC_PERF_CNTR_LCB(DcStsTxMbe, DC_LCB_PG_STS_TX_MBE_CNT,
4246                          CNTR_SYNTH),
4247 [C_SW_CPU_INTR] = CNTR_ELEM("Intr", 0, 0, CNTR_NORMAL,
4248                             access_sw_cpu_intr),
4249 [C_SW_CPU_RCV_LIM] = CNTR_ELEM("RcvLimit", 0, 0, CNTR_NORMAL,
4250                             access_sw_cpu_rcv_limit),
4251 [C_SW_VTX_WAIT] = CNTR_ELEM("vTxWait", 0, 0, CNTR_NORMAL,
4252                             access_sw_vtx_wait),
4253 [C_SW_PIO_WAIT] = CNTR_ELEM("PioWait", 0, 0, CNTR_NORMAL,
4254                             access_sw_pio_wait),
4255 [C_SW_PIO_DRAIN] = CNTR_ELEM("PioDrain", 0, 0, CNTR_NORMAL,
4256                             access_sw_pio_drain),
4257 [C_SW_KMEM_WAIT] = CNTR_ELEM("KmemWait", 0, 0, CNTR_NORMAL,
4258                             access_sw_kmem_wait),
4259 [C_SW_TID_WAIT] = CNTR_ELEM("TidWait", 0, 0, CNTR_NORMAL,
4260                             hfi1_access_sw_tid_wait),
4261 [C_SW_SEND_SCHED] = CNTR_ELEM("SendSched", 0, 0, CNTR_NORMAL,
4262                             access_sw_send_schedule),
4263 [C_SDMA_DESC_FETCHED_CNT] = CNTR_ELEM("SDEDscFdCn",
4264                                       SEND_DMA_DESC_FETCHED_CNT, 0,
4265                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4266                                       dev_access_u32_csr),
4267 [C_SDMA_INT_CNT] = CNTR_ELEM("SDMAInt", 0, 0,
4268                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4269                              access_sde_int_cnt),
4270 [C_SDMA_ERR_CNT] = CNTR_ELEM("SDMAErrCt", 0, 0,
4271                              CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4272                              access_sde_err_cnt),
4273 [C_SDMA_IDLE_INT_CNT] = CNTR_ELEM("SDMAIdInt", 0, 0,
4274                                   CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4275                                   access_sde_idle_int_cnt),
4276 [C_SDMA_PROGRESS_INT_CNT] = CNTR_ELEM("SDMAPrIntCn", 0, 0,
4277                                       CNTR_NORMAL | CNTR_32BIT | CNTR_SDMA,
4278                                       access_sde_progress_int_cnt),
4279 /* MISC_ERR_STATUS */
4280 [C_MISC_PLL_LOCK_FAIL_ERR] = CNTR_ELEM("MISC_PLL_LOCK_FAIL_ERR", 0, 0,
4281                                 CNTR_NORMAL,
4282                                 access_misc_pll_lock_fail_err_cnt),
4283 [C_MISC_MBIST_FAIL_ERR] = CNTR_ELEM("MISC_MBIST_FAIL_ERR", 0, 0,
4284                                 CNTR_NORMAL,
4285                                 access_misc_mbist_fail_err_cnt),
4286 [C_MISC_INVALID_EEP_CMD_ERR] = CNTR_ELEM("MISC_INVALID_EEP_CMD_ERR", 0, 0,
4287                                 CNTR_NORMAL,
4288                                 access_misc_invalid_eep_cmd_err_cnt),
4289 [C_MISC_EFUSE_DONE_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_DONE_PARITY_ERR", 0, 0,
4290                                 CNTR_NORMAL,
4291                                 access_misc_efuse_done_parity_err_cnt),
4292 [C_MISC_EFUSE_WRITE_ERR] = CNTR_ELEM("MISC_EFUSE_WRITE_ERR", 0, 0,
4293                                 CNTR_NORMAL,
4294                                 access_misc_efuse_write_err_cnt),
4295 [C_MISC_EFUSE_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_EFUSE_READ_BAD_ADDR_ERR", 0,
4296                                 0, CNTR_NORMAL,
4297                                 access_misc_efuse_read_bad_addr_err_cnt),
4298 [C_MISC_EFUSE_CSR_PARITY_ERR] = CNTR_ELEM("MISC_EFUSE_CSR_PARITY_ERR", 0, 0,
4299                                 CNTR_NORMAL,
4300                                 access_misc_efuse_csr_parity_err_cnt),
4301 [C_MISC_FW_AUTH_FAILED_ERR] = CNTR_ELEM("MISC_FW_AUTH_FAILED_ERR", 0, 0,
4302                                 CNTR_NORMAL,
4303                                 access_misc_fw_auth_failed_err_cnt),
4304 [C_MISC_KEY_MISMATCH_ERR] = CNTR_ELEM("MISC_KEY_MISMATCH_ERR", 0, 0,
4305                                 CNTR_NORMAL,
4306                                 access_misc_key_mismatch_err_cnt),
4307 [C_MISC_SBUS_WRITE_FAILED_ERR] = CNTR_ELEM("MISC_SBUS_WRITE_FAILED_ERR", 0, 0,
4308                                 CNTR_NORMAL,
4309                                 access_misc_sbus_write_failed_err_cnt),
4310 [C_MISC_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_WRITE_BAD_ADDR_ERR", 0, 0,
4311                                 CNTR_NORMAL,
4312                                 access_misc_csr_write_bad_addr_err_cnt),
4313 [C_MISC_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("MISC_CSR_READ_BAD_ADDR_ERR", 0, 0,
4314                                 CNTR_NORMAL,
4315                                 access_misc_csr_read_bad_addr_err_cnt),
4316 [C_MISC_CSR_PARITY_ERR] = CNTR_ELEM("MISC_CSR_PARITY_ERR", 0, 0,
4317                                 CNTR_NORMAL,
4318                                 access_misc_csr_parity_err_cnt),
4319 /* CceErrStatus */
4320 [C_CCE_ERR_STATUS_AGGREGATED_CNT] = CNTR_ELEM("CceErrStatusAggregatedCnt", 0, 0,
4321                                 CNTR_NORMAL,
4322                                 access_sw_cce_err_status_aggregated_cnt),
4323 [C_CCE_MSIX_CSR_PARITY_ERR] = CNTR_ELEM("CceMsixCsrParityErr", 0, 0,
4324                                 CNTR_NORMAL,
4325                                 access_cce_msix_csr_parity_err_cnt),
4326 [C_CCE_INT_MAP_UNC_ERR] = CNTR_ELEM("CceIntMapUncErr", 0, 0,
4327                                 CNTR_NORMAL,
4328                                 access_cce_int_map_unc_err_cnt),
4329 [C_CCE_INT_MAP_COR_ERR] = CNTR_ELEM("CceIntMapCorErr", 0, 0,
4330                                 CNTR_NORMAL,
4331                                 access_cce_int_map_cor_err_cnt),
4332 [C_CCE_MSIX_TABLE_UNC_ERR] = CNTR_ELEM("CceMsixTableUncErr", 0, 0,
4333                                 CNTR_NORMAL,
4334                                 access_cce_msix_table_unc_err_cnt),
4335 [C_CCE_MSIX_TABLE_COR_ERR] = CNTR_ELEM("CceMsixTableCorErr", 0, 0,
4336                                 CNTR_NORMAL,
4337                                 access_cce_msix_table_cor_err_cnt),
4338 [C_CCE_RXDMA_CONV_FIFO_PARITY_ERR] = CNTR_ELEM("CceRxdmaConvFifoParityErr", 0,
4339                                 0, CNTR_NORMAL,
4340                                 access_cce_rxdma_conv_fifo_parity_err_cnt),
4341 [C_CCE_RCPL_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceRcplAsyncFifoParityErr", 0,
4342                                 0, CNTR_NORMAL,
4343                                 access_cce_rcpl_async_fifo_parity_err_cnt),
4344 [C_CCE_SEG_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceSegWriteBadAddrErr", 0, 0,
4345                                 CNTR_NORMAL,
4346                                 access_cce_seg_write_bad_addr_err_cnt),
4347 [C_CCE_SEG_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceSegReadBadAddrErr", 0, 0,
4348                                 CNTR_NORMAL,
4349                                 access_cce_seg_read_bad_addr_err_cnt),
4350 [C_LA_TRIGGERED] = CNTR_ELEM("Cce LATriggered", 0, 0,
4351                                 CNTR_NORMAL,
4352                                 access_la_triggered_cnt),
4353 [C_CCE_TRGT_CPL_TIMEOUT_ERR] = CNTR_ELEM("CceTrgtCplTimeoutErr", 0, 0,
4354                                 CNTR_NORMAL,
4355                                 access_cce_trgt_cpl_timeout_err_cnt),
4356 [C_PCIC_RECEIVE_PARITY_ERR] = CNTR_ELEM("PcicReceiveParityErr", 0, 0,
4357                                 CNTR_NORMAL,
4358                                 access_pcic_receive_parity_err_cnt),
4359 [C_PCIC_TRANSMIT_BACK_PARITY_ERR] = CNTR_ELEM("PcicTransmitBackParityErr", 0, 0,
4360                                 CNTR_NORMAL,
4361                                 access_pcic_transmit_back_parity_err_cnt),
4362 [C_PCIC_TRANSMIT_FRONT_PARITY_ERR] = CNTR_ELEM("PcicTransmitFrontParityErr", 0,
4363                                 0, CNTR_NORMAL,
4364                                 access_pcic_transmit_front_parity_err_cnt),
4365 [C_PCIC_CPL_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicCplDatQUncErr", 0, 0,
4366                                 CNTR_NORMAL,
4367                                 access_pcic_cpl_dat_q_unc_err_cnt),
4368 [C_PCIC_CPL_HD_Q_UNC_ERR] = CNTR_ELEM("PcicCplHdQUncErr", 0, 0,
4369                                 CNTR_NORMAL,
4370                                 access_pcic_cpl_hd_q_unc_err_cnt),
4371 [C_PCIC_POST_DAT_Q_UNC_ERR] = CNTR_ELEM("PcicPostDatQUncErr", 0, 0,
4372                                 CNTR_NORMAL,
4373                                 access_pcic_post_dat_q_unc_err_cnt),
4374 [C_PCIC_POST_HD_Q_UNC_ERR] = CNTR_ELEM("PcicPostHdQUncErr", 0, 0,
4375                                 CNTR_NORMAL,
4376                                 access_pcic_post_hd_q_unc_err_cnt),
4377 [C_PCIC_RETRY_SOT_MEM_UNC_ERR] = CNTR_ELEM("PcicRetrySotMemUncErr", 0, 0,
4378                                 CNTR_NORMAL,
4379                                 access_pcic_retry_sot_mem_unc_err_cnt),
4380 [C_PCIC_RETRY_MEM_UNC_ERR] = CNTR_ELEM("PcicRetryMemUncErr", 0, 0,
4381                                 CNTR_NORMAL,
4382                                 access_pcic_retry_mem_unc_err),
4383 [C_PCIC_N_POST_DAT_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostDatQParityErr", 0, 0,
4384                                 CNTR_NORMAL,
4385                                 access_pcic_n_post_dat_q_parity_err_cnt),
4386 [C_PCIC_N_POST_H_Q_PARITY_ERR] = CNTR_ELEM("PcicNPostHQParityErr", 0, 0,
4387                                 CNTR_NORMAL,
4388                                 access_pcic_n_post_h_q_parity_err_cnt),
4389 [C_PCIC_CPL_DAT_Q_COR_ERR] = CNTR_ELEM("PcicCplDatQCorErr", 0, 0,
4390                                 CNTR_NORMAL,
4391                                 access_pcic_cpl_dat_q_cor_err_cnt),
4392 [C_PCIC_CPL_HD_Q_COR_ERR] = CNTR_ELEM("PcicCplHdQCorErr", 0, 0,
4393                                 CNTR_NORMAL,
4394                                 access_pcic_cpl_hd_q_cor_err_cnt),
4395 [C_PCIC_POST_DAT_Q_COR_ERR] = CNTR_ELEM("PcicPostDatQCorErr", 0, 0,
4396                                 CNTR_NORMAL,
4397                                 access_pcic_post_dat_q_cor_err_cnt),
4398 [C_PCIC_POST_HD_Q_COR_ERR] = CNTR_ELEM("PcicPostHdQCorErr", 0, 0,
4399                                 CNTR_NORMAL,
4400                                 access_pcic_post_hd_q_cor_err_cnt),
4401 [C_PCIC_RETRY_SOT_MEM_COR_ERR] = CNTR_ELEM("PcicRetrySotMemCorErr", 0, 0,
4402                                 CNTR_NORMAL,
4403                                 access_pcic_retry_sot_mem_cor_err_cnt),
4404 [C_PCIC_RETRY_MEM_COR_ERR] = CNTR_ELEM("PcicRetryMemCorErr", 0, 0,
4405                                 CNTR_NORMAL,
4406                                 access_pcic_retry_mem_cor_err_cnt),
4407 [C_CCE_CLI1_ASYNC_FIFO_DBG_PARITY_ERR] = CNTR_ELEM(
4408                                 "CceCli1AsyncFifoDbgParityError", 0, 0,
4409                                 CNTR_NORMAL,
4410                                 access_cce_cli1_async_fifo_dbg_parity_err_cnt),
4411 [C_CCE_CLI1_ASYNC_FIFO_RXDMA_PARITY_ERR] = CNTR_ELEM(
4412                                 "CceCli1AsyncFifoRxdmaParityError", 0, 0,
4413                                 CNTR_NORMAL,
4414                                 access_cce_cli1_async_fifo_rxdma_parity_err_cnt
4415                                 ),
4416 [C_CCE_CLI1_ASYNC_FIFO_SDMA_HD_PARITY_ERR] = CNTR_ELEM(
4417                         "CceCli1AsyncFifoSdmaHdParityErr", 0, 0,
4418                         CNTR_NORMAL,
4419                         access_cce_cli1_async_fifo_sdma_hd_parity_err_cnt),
4420 [C_CCE_CLI1_ASYNC_FIFO_PIO_CRDT_PARITY_ERR] = CNTR_ELEM(
4421                         "CceCli1AsyncFifoPioCrdtParityErr", 0, 0,
4422                         CNTR_NORMAL,
4423                         access_cce_cl1_async_fifo_pio_crdt_parity_err_cnt),
4424 [C_CCE_CLI2_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceCli2AsyncFifoParityErr", 0,
4425                         0, CNTR_NORMAL,
4426                         access_cce_cli2_async_fifo_parity_err_cnt),
4427 [C_CCE_CSR_CFG_BUS_PARITY_ERR] = CNTR_ELEM("CceCsrCfgBusParityErr", 0, 0,
4428                         CNTR_NORMAL,
4429                         access_cce_csr_cfg_bus_parity_err_cnt),
4430 [C_CCE_CLI0_ASYNC_FIFO_PARTIY_ERR] = CNTR_ELEM("CceCli0AsyncFifoParityErr", 0,
4431                         0, CNTR_NORMAL,
4432                         access_cce_cli0_async_fifo_parity_err_cnt),
4433 [C_CCE_RSPD_DATA_PARITY_ERR] = CNTR_ELEM("CceRspdDataParityErr", 0, 0,
4434                         CNTR_NORMAL,
4435                         access_cce_rspd_data_parity_err_cnt),
4436 [C_CCE_TRGT_ACCESS_ERR] = CNTR_ELEM("CceTrgtAccessErr", 0, 0,
4437                         CNTR_NORMAL,
4438                         access_cce_trgt_access_err_cnt),
4439 [C_CCE_TRGT_ASYNC_FIFO_PARITY_ERR] = CNTR_ELEM("CceTrgtAsyncFifoParityErr", 0,
4440                         0, CNTR_NORMAL,
4441                         access_cce_trgt_async_fifo_parity_err_cnt),
4442 [C_CCE_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrWriteBadAddrErr", 0, 0,
4443                         CNTR_NORMAL,
4444                         access_cce_csr_write_bad_addr_err_cnt),
4445 [C_CCE_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("CceCsrReadBadAddrErr", 0, 0,
4446                         CNTR_NORMAL,
4447                         access_cce_csr_read_bad_addr_err_cnt),
4448 [C_CCE_CSR_PARITY_ERR] = CNTR_ELEM("CceCsrParityErr", 0, 0,
4449                         CNTR_NORMAL,
4450                         access_ccs_csr_parity_err_cnt),
4451
4452 /* RcvErrStatus */
4453 [C_RX_CSR_PARITY_ERR] = CNTR_ELEM("RxCsrParityErr", 0, 0,
4454                         CNTR_NORMAL,
4455                         access_rx_csr_parity_err_cnt),
4456 [C_RX_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrWriteBadAddrErr", 0, 0,
4457                         CNTR_NORMAL,
4458                         access_rx_csr_write_bad_addr_err_cnt),
4459 [C_RX_CSR_READ_BAD_ADDR_ERR] = CNTR_ELEM("RxCsrReadBadAddrErr", 0, 0,
4460                         CNTR_NORMAL,
4461                         access_rx_csr_read_bad_addr_err_cnt),
4462 [C_RX_DMA_CSR_UNC_ERR] = CNTR_ELEM("RxDmaCsrUncErr", 0, 0,
4463                         CNTR_NORMAL,
4464                         access_rx_dma_csr_unc_err_cnt),
4465 [C_RX_DMA_DQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaDqFsmEncodingErr", 0, 0,
4466                         CNTR_NORMAL,
4467                         access_rx_dma_dq_fsm_encoding_err_cnt),
4468 [C_RX_DMA_EQ_FSM_ENCODING_ERR] = CNTR_ELEM("RxDmaEqFsmEncodingErr", 0, 0,
4469                         CNTR_NORMAL,
4470                         access_rx_dma_eq_fsm_encoding_err_cnt),
4471 [C_RX_DMA_CSR_PARITY_ERR] = CNTR_ELEM("RxDmaCsrParityErr", 0, 0,
4472                         CNTR_NORMAL,
4473                         access_rx_dma_csr_parity_err_cnt),
4474 [C_RX_RBUF_DATA_COR_ERR] = CNTR_ELEM("RxRbufDataCorErr", 0, 0,
4475                         CNTR_NORMAL,
4476                         access_rx_rbuf_data_cor_err_cnt),
4477 [C_RX_RBUF_DATA_UNC_ERR] = CNTR_ELEM("RxRbufDataUncErr", 0, 0,
4478                         CNTR_NORMAL,
4479                         access_rx_rbuf_data_unc_err_cnt),
4480 [C_RX_DMA_DATA_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaDataFifoRdCorErr", 0, 0,
4481                         CNTR_NORMAL,
4482                         access_rx_dma_data_fifo_rd_cor_err_cnt),
4483 [C_RX_DMA_DATA_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaDataFifoRdUncErr", 0, 0,
4484                         CNTR_NORMAL,
4485                         access_rx_dma_data_fifo_rd_unc_err_cnt),
4486 [C_RX_DMA_HDR_FIFO_RD_COR_ERR] = CNTR_ELEM("RxDmaHdrFifoRdCorErr", 0, 0,
4487                         CNTR_NORMAL,
4488                         access_rx_dma_hdr_fifo_rd_cor_err_cnt),
4489 [C_RX_DMA_HDR_FIFO_RD_UNC_ERR] = CNTR_ELEM("RxDmaHdrFifoRdUncErr", 0, 0,
4490                         CNTR_NORMAL,
4491                         access_rx_dma_hdr_fifo_rd_unc_err_cnt),
4492 [C_RX_RBUF_DESC_PART2_COR_ERR] = CNTR_ELEM("RxRbufDescPart2CorErr", 0, 0,
4493                         CNTR_NORMAL,
4494                         access_rx_rbuf_desc_part2_cor_err_cnt),
4495 [C_RX_RBUF_DESC_PART2_UNC_ERR] = CNTR_ELEM("RxRbufDescPart2UncErr", 0, 0,
4496                         CNTR_NORMAL,
4497                         access_rx_rbuf_desc_part2_unc_err_cnt),
4498 [C_RX_RBUF_DESC_PART1_COR_ERR] = CNTR_ELEM("RxRbufDescPart1CorErr", 0, 0,
4499                         CNTR_NORMAL,
4500                         access_rx_rbuf_desc_part1_cor_err_cnt),
4501 [C_RX_RBUF_DESC_PART1_UNC_ERR] = CNTR_ELEM("RxRbufDescPart1UncErr", 0, 0,
4502                         CNTR_NORMAL,
4503                         access_rx_rbuf_desc_part1_unc_err_cnt),
4504 [C_RX_HQ_INTR_FSM_ERR] = CNTR_ELEM("RxHqIntrFsmErr", 0, 0,
4505                         CNTR_NORMAL,
4506                         access_rx_hq_intr_fsm_err_cnt),
4507 [C_RX_HQ_INTR_CSR_PARITY_ERR] = CNTR_ELEM("RxHqIntrCsrParityErr", 0, 0,
4508                         CNTR_NORMAL,
4509                         access_rx_hq_intr_csr_parity_err_cnt),
4510 [C_RX_LOOKUP_CSR_PARITY_ERR] = CNTR_ELEM("RxLookupCsrParityErr", 0, 0,
4511                         CNTR_NORMAL,
4512                         access_rx_lookup_csr_parity_err_cnt),
4513 [C_RX_LOOKUP_RCV_ARRAY_COR_ERR] = CNTR_ELEM("RxLookupRcvArrayCorErr", 0, 0,
4514                         CNTR_NORMAL,
4515                         access_rx_lookup_rcv_array_cor_err_cnt),
4516 [C_RX_LOOKUP_RCV_ARRAY_UNC_ERR] = CNTR_ELEM("RxLookupRcvArrayUncErr", 0, 0,
4517                         CNTR_NORMAL,
4518                         access_rx_lookup_rcv_array_unc_err_cnt),
4519 [C_RX_LOOKUP_DES_PART2_PARITY_ERR] = CNTR_ELEM("RxLookupDesPart2ParityErr", 0,
4520                         0, CNTR_NORMAL,
4521                         access_rx_lookup_des_part2_parity_err_cnt),
4522 [C_RX_LOOKUP_DES_PART1_UNC_COR_ERR] = CNTR_ELEM("RxLookupDesPart1UncCorErr", 0,
4523                         0, CNTR_NORMAL,
4524                         access_rx_lookup_des_part1_unc_cor_err_cnt),
4525 [C_RX_LOOKUP_DES_PART1_UNC_ERR] = CNTR_ELEM("RxLookupDesPart1UncErr", 0, 0,
4526                         CNTR_NORMAL,
4527                         access_rx_lookup_des_part1_unc_err_cnt),
4528 [C_RX_RBUF_NEXT_FREE_BUF_COR_ERR] = CNTR_ELEM("RxRbufNextFreeBufCorErr", 0, 0,
4529                         CNTR_NORMAL,
4530                         access_rx_rbuf_next_free_buf_cor_err_cnt),
4531 [C_RX_RBUF_NEXT_FREE_BUF_UNC_ERR] = CNTR_ELEM("RxRbufNextFreeBufUncErr", 0, 0,
4532                         CNTR_NORMAL,
4533                         access_rx_rbuf_next_free_buf_unc_err_cnt),
4534 [C_RX_RBUF_FL_INIT_WR_ADDR_PARITY_ERR] = CNTR_ELEM(
4535                         "RxRbufFlInitWrAddrParityErr", 0, 0,
4536                         CNTR_NORMAL,
4537                         access_rbuf_fl_init_wr_addr_parity_err_cnt),
4538 [C_RX_RBUF_FL_INITDONE_PARITY_ERR] = CNTR_ELEM("RxRbufFlInitdoneParityErr", 0,
4539                         0, CNTR_NORMAL,
4540                         access_rx_rbuf_fl_initdone_parity_err_cnt),
4541 [C_RX_RBUF_FL_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlWrAddrParityErr", 0,
4542                         0, CNTR_NORMAL,
4543                         access_rx_rbuf_fl_write_addr_parity_err_cnt),
4544 [C_RX_RBUF_FL_RD_ADDR_PARITY_ERR] = CNTR_ELEM("RxRbufFlRdAddrParityErr", 0, 0,
4545                         CNTR_NORMAL,
4546                         access_rx_rbuf_fl_rd_addr_parity_err_cnt),
4547 [C_RX_RBUF_EMPTY_ERR] = CNTR_ELEM("RxRbufEmptyErr", 0, 0,
4548                         CNTR_NORMAL,
4549                         access_rx_rbuf_empty_err_cnt),
4550 [C_RX_RBUF_FULL_ERR] = CNTR_ELEM("RxRbufFullErr", 0, 0,
4551                         CNTR_NORMAL,
4552                         access_rx_rbuf_full_err_cnt),
4553 [C_RX_RBUF_BAD_LOOKUP_ERR] = CNTR_ELEM("RxRBufBadLookupErr", 0, 0,
4554                         CNTR_NORMAL,
4555                         access_rbuf_bad_lookup_err_cnt),
4556 [C_RX_RBUF_CTX_ID_PARITY_ERR] = CNTR_ELEM("RxRbufCtxIdParityErr", 0, 0,
4557                         CNTR_NORMAL,
4558                         access_rbuf_ctx_id_parity_err_cnt),
4559 [C_RX_RBUF_CSR_QEOPDW_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEOPDWParityErr", 0, 0,
4560                         CNTR_NORMAL,
4561                         access_rbuf_csr_qeopdw_parity_err_cnt),
4562 [C_RX_RBUF_CSR_Q_NUM_OF_PKT_PARITY_ERR] = CNTR_ELEM(
4563                         "RxRbufCsrQNumOfPktParityErr", 0, 0,
4564                         CNTR_NORMAL,
4565                         access_rx_rbuf_csr_q_num_of_pkt_parity_err_cnt),
4566 [C_RX_RBUF_CSR_Q_T1_PTR_PARITY_ERR] = CNTR_ELEM(
4567                         "RxRbufCsrQTlPtrParityErr", 0, 0,
4568                         CNTR_NORMAL,
4569                         access_rx_rbuf_csr_q_t1_ptr_parity_err_cnt),
4570 [C_RX_RBUF_CSR_Q_HD_PTR_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQHdPtrParityErr", 0,
4571                         0, CNTR_NORMAL,
4572                         access_rx_rbuf_csr_q_hd_ptr_parity_err_cnt),
4573 [C_RX_RBUF_CSR_Q_VLD_BIT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQVldBitParityErr", 0,
4574                         0, CNTR_NORMAL,
4575                         access_rx_rbuf_csr_q_vld_bit_parity_err_cnt),
4576 [C_RX_RBUF_CSR_Q_NEXT_BUF_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQNextBufParityErr",
4577                         0, 0, CNTR_NORMAL,
4578                         access_rx_rbuf_csr_q_next_buf_parity_err_cnt),
4579 [C_RX_RBUF_CSR_Q_ENT_CNT_PARITY_ERR] = CNTR_ELEM("RxRbufCsrQEntCntParityErr", 0,
4580                         0, CNTR_NORMAL,
4581                         access_rx_rbuf_csr_q_ent_cnt_parity_err_cnt),
4582 [C_RX_RBUF_CSR_Q_HEAD_BUF_NUM_PARITY_ERR] = CNTR_ELEM(
4583                         "RxRbufCsrQHeadBufNumParityErr", 0, 0,
4584                         CNTR_NORMAL,
4585                         access_rx_rbuf_csr_q_head_buf_num_parity_err_cnt),
4586 [C_RX_RBUF_BLOCK_LIST_READ_COR_ERR] = CNTR_ELEM("RxRbufBlockListReadCorErr", 0,
4587                         0, CNTR_NORMAL,
4588                         access_rx_rbuf_block_list_read_cor_err_cnt),
4589 [C_RX_RBUF_BLOCK_LIST_READ_UNC_ERR] = CNTR_ELEM("RxRbufBlockListReadUncErr", 0,
4590                         0, CNTR_NORMAL,
4591                         access_rx_rbuf_block_list_read_unc_err_cnt),
4592 [C_RX_RBUF_LOOKUP_DES_COR_ERR] = CNTR_ELEM("RxRbufLookupDesCorErr", 0, 0,
4593                         CNTR_NORMAL,
4594                         access_rx_rbuf_lookup_des_cor_err_cnt),
4595 [C_RX_RBUF_LOOKUP_DES_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesUncErr", 0, 0,
4596                         CNTR_NORMAL,
4597                         access_rx_rbuf_lookup_des_unc_err_cnt),
4598 [C_RX_RBUF_LOOKUP_DES_REG_UNC_COR_ERR] = CNTR_ELEM(
4599                         "RxRbufLookupDesRegUncCorErr", 0, 0,
4600                         CNTR_NORMAL,
4601                         access_rx_rbuf_lookup_des_reg_unc_cor_err_cnt),
4602 [C_RX_RBUF_LOOKUP_DES_REG_UNC_ERR] = CNTR_ELEM("RxRbufLookupDesRegUncErr", 0, 0,
4603                         CNTR_NORMAL,
4604                         access_rx_rbuf_lookup_des_reg_unc_err_cnt),
4605 [C_RX_RBUF_FREE_LIST_COR_ERR] = CNTR_ELEM("RxRbufFreeListCorErr", 0, 0,
4606                         CNTR_NORMAL,
4607                         access_rx_rbuf_free_list_cor_err_cnt),
4608 [C_RX_RBUF_FREE_LIST_UNC_ERR] = CNTR_ELEM("RxRbufFreeListUncErr", 0, 0,
4609                         CNTR_NORMAL,
4610                         access_rx_rbuf_free_list_unc_err_cnt),
4611 [C_RX_RCV_FSM_ENCODING_ERR] = CNTR_ELEM("RxRcvFsmEncodingErr", 0, 0,
4612                         CNTR_NORMAL,
4613                         access_rx_rcv_fsm_encoding_err_cnt),
4614 [C_RX_DMA_FLAG_COR_ERR] = CNTR_ELEM("RxDmaFlagCorErr", 0, 0,
4615                         CNTR_NORMAL,
4616                         access_rx_dma_flag_cor_err_cnt),
4617 [C_RX_DMA_FLAG_UNC_ERR] = CNTR_ELEM("RxDmaFlagUncErr", 0, 0,
4618                         CNTR_NORMAL,
4619                         access_rx_dma_flag_unc_err_cnt),
4620 [C_RX_DC_SOP_EOP_PARITY_ERR] = CNTR_ELEM("RxDcSopEopParityErr", 0, 0,
4621                         CNTR_NORMAL,
4622                         access_rx_dc_sop_eop_parity_err_cnt),
4623 [C_RX_RCV_CSR_PARITY_ERR] = CNTR_ELEM("RxRcvCsrParityErr", 0, 0,
4624                         CNTR_NORMAL,
4625                         access_rx_rcv_csr_parity_err_cnt),
4626 [C_RX_RCV_QP_MAP_TABLE_COR_ERR] = CNTR_ELEM("RxRcvQpMapTableCorErr", 0, 0,
4627                         CNTR_NORMAL,
4628                         access_rx_rcv_qp_map_table_cor_err_cnt),
4629 [C_RX_RCV_QP_MAP_TABLE_UNC_ERR] = CNTR_ELEM("RxRcvQpMapTableUncErr", 0, 0,
4630                         CNTR_NORMAL,
4631                         access_rx_rcv_qp_map_table_unc_err_cnt),
4632 [C_RX_RCV_DATA_COR_ERR] = CNTR_ELEM("RxRcvDataCorErr", 0, 0,
4633                         CNTR_NORMAL,
4634                         access_rx_rcv_data_cor_err_cnt),
4635 [C_RX_RCV_DATA_UNC_ERR] = CNTR_ELEM("RxRcvDataUncErr", 0, 0,
4636                         CNTR_NORMAL,
4637                         access_rx_rcv_data_unc_err_cnt),
4638 [C_RX_RCV_HDR_COR_ERR] = CNTR_ELEM("RxRcvHdrCorErr", 0, 0,
4639                         CNTR_NORMAL,
4640                         access_rx_rcv_hdr_cor_err_cnt),
4641 [C_RX_RCV_HDR_UNC_ERR] = CNTR_ELEM("RxRcvHdrUncErr", 0, 0,
4642                         CNTR_NORMAL,
4643                         access_rx_rcv_hdr_unc_err_cnt),
4644 [C_RX_DC_INTF_PARITY_ERR] = CNTR_ELEM("RxDcIntfParityErr", 0, 0,
4645                         CNTR_NORMAL,
4646                         access_rx_dc_intf_parity_err_cnt),
4647 [C_RX_DMA_CSR_COR_ERR] = CNTR_ELEM("RxDmaCsrCorErr", 0, 0,
4648                         CNTR_NORMAL,
4649                         access_rx_dma_csr_cor_err_cnt),
4650 /* SendPioErrStatus */
4651 [C_PIO_PEC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPecSopHeadParityErr", 0, 0,
4652                         CNTR_NORMAL,
4653                         access_pio_pec_sop_head_parity_err_cnt),
4654 [C_PIO_PCC_SOP_HEAD_PARITY_ERR] = CNTR_ELEM("PioPccSopHeadParityErr", 0, 0,
4655                         CNTR_NORMAL,
4656                         access_pio_pcc_sop_head_parity_err_cnt),
4657 [C_PIO_LAST_RETURNED_CNT_PARITY_ERR] = CNTR_ELEM("PioLastReturnedCntParityErr",
4658                         0, 0, CNTR_NORMAL,
4659                         access_pio_last_returned_cnt_parity_err_cnt),
4660 [C_PIO_CURRENT_FREE_CNT_PARITY_ERR] = CNTR_ELEM("PioCurrentFreeCntParityErr", 0,
4661                         0, CNTR_NORMAL,
4662                         access_pio_current_free_cnt_parity_err_cnt),
4663 [C_PIO_RSVD_31_ERR] = CNTR_ELEM("Pio Reserved 31", 0, 0,
4664                         CNTR_NORMAL,
4665                         access_pio_reserved_31_err_cnt),
4666 [C_PIO_RSVD_30_ERR] = CNTR_ELEM("Pio Reserved 30", 0, 0,
4667                         CNTR_NORMAL,
4668                         access_pio_reserved_30_err_cnt),
4669 [C_PIO_PPMC_SOP_LEN_ERR] = CNTR_ELEM("PioPpmcSopLenErr", 0, 0,
4670                         CNTR_NORMAL,
4671                         access_pio_ppmc_sop_len_err_cnt),
4672 [C_PIO_PPMC_BQC_MEM_PARITY_ERR] = CNTR_ELEM("PioPpmcBqcMemParityErr", 0, 0,
4673                         CNTR_NORMAL,
4674                         access_pio_ppmc_bqc_mem_parity_err_cnt),
4675 [C_PIO_VL_FIFO_PARITY_ERR] = CNTR_ELEM("PioVlFifoParityErr", 0, 0,
4676                         CNTR_NORMAL,
4677                         access_pio_vl_fifo_parity_err_cnt),
4678 [C_PIO_VLF_SOP_PARITY_ERR] = CNTR_ELEM("PioVlfSopParityErr", 0, 0,
4679                         CNTR_NORMAL,
4680                         access_pio_vlf_sop_parity_err_cnt),
4681 [C_PIO_VLF_V1_LEN_PARITY_ERR] = CNTR_ELEM("PioVlfVlLenParityErr", 0, 0,
4682                         CNTR_NORMAL,
4683                         access_pio_vlf_v1_len_parity_err_cnt),
4684 [C_PIO_BLOCK_QW_COUNT_PARITY_ERR] = CNTR_ELEM("PioBlockQwCountParityErr", 0, 0,
4685                         CNTR_NORMAL,
4686                         access_pio_block_qw_count_parity_err_cnt),
4687 [C_PIO_WRITE_QW_VALID_PARITY_ERR] = CNTR_ELEM("PioWriteQwValidParityErr", 0, 0,
4688                         CNTR_NORMAL,
4689                         access_pio_write_qw_valid_parity_err_cnt),
4690 [C_PIO_STATE_MACHINE_ERR] = CNTR_ELEM("PioStateMachineErr", 0, 0,
4691                         CNTR_NORMAL,
4692                         access_pio_state_machine_err_cnt),
4693 [C_PIO_WRITE_DATA_PARITY_ERR] = CNTR_ELEM("PioWriteDataParityErr", 0, 0,
4694                         CNTR_NORMAL,
4695                         access_pio_write_data_parity_err_cnt),
4696 [C_PIO_HOST_ADDR_MEM_COR_ERR] = CNTR_ELEM("PioHostAddrMemCorErr", 0, 0,
4697                         CNTR_NORMAL,
4698                         access_pio_host_addr_mem_cor_err_cnt),
4699 [C_PIO_HOST_ADDR_MEM_UNC_ERR] = CNTR_ELEM("PioHostAddrMemUncErr", 0, 0,
4700                         CNTR_NORMAL,
4701                         access_pio_host_addr_mem_unc_err_cnt),
4702 [C_PIO_PKT_EVICT_SM_OR_ARM_SM_ERR] = CNTR_ELEM("PioPktEvictSmOrArbSmErr", 0, 0,
4703                         CNTR_NORMAL,
4704                         access_pio_pkt_evict_sm_or_arb_sm_err_cnt),
4705 [C_PIO_INIT_SM_IN_ERR] = CNTR_ELEM("PioInitSmInErr", 0, 0,
4706                         CNTR_NORMAL,
4707                         access_pio_init_sm_in_err_cnt),
4708 [C_PIO_PPMC_PBL_FIFO_ERR] = CNTR_ELEM("PioPpmcPblFifoErr", 0, 0,
4709                         CNTR_NORMAL,
4710                         access_pio_ppmc_pbl_fifo_err_cnt),
4711 [C_PIO_CREDIT_RET_FIFO_PARITY_ERR] = CNTR_ELEM("PioCreditRetFifoParityErr", 0,
4712                         0, CNTR_NORMAL,
4713                         access_pio_credit_ret_fifo_parity_err_cnt),
4714 [C_PIO_V1_LEN_MEM_BANK1_COR_ERR] = CNTR_ELEM("PioVlLenMemBank1CorErr", 0, 0,
4715                         CNTR_NORMAL,
4716                         access_pio_v1_len_mem_bank1_cor_err_cnt),
4717 [C_PIO_V1_LEN_MEM_BANK0_COR_ERR] = CNTR_ELEM("PioVlLenMemBank0CorErr", 0, 0,
4718                         CNTR_NORMAL,
4719                         access_pio_v1_len_mem_bank0_cor_err_cnt),
4720 [C_PIO_V1_LEN_MEM_BANK1_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank1UncErr", 0, 0,
4721                         CNTR_NORMAL,
4722                         access_pio_v1_len_mem_bank1_unc_err_cnt),
4723 [C_PIO_V1_LEN_MEM_BANK0_UNC_ERR] = CNTR_ELEM("PioVlLenMemBank0UncErr", 0, 0,
4724                         CNTR_NORMAL,
4725                         access_pio_v1_len_mem_bank0_unc_err_cnt),
4726 [C_PIO_SM_PKT_RESET_PARITY_ERR] = CNTR_ELEM("PioSmPktResetParityErr", 0, 0,
4727                         CNTR_NORMAL,
4728                         access_pio_sm_pkt_reset_parity_err_cnt),
4729 [C_PIO_PKT_EVICT_FIFO_PARITY_ERR] = CNTR_ELEM("PioPktEvictFifoParityErr", 0, 0,
4730                         CNTR_NORMAL,
4731                         access_pio_pkt_evict_fifo_parity_err_cnt),
4732 [C_PIO_SBRDCTRL_CRREL_FIFO_PARITY_ERR] = CNTR_ELEM(
4733                         "PioSbrdctrlCrrelFifoParityErr", 0, 0,
4734                         CNTR_NORMAL,
4735                         access_pio_sbrdctrl_crrel_fifo_parity_err_cnt),
4736 [C_PIO_SBRDCTL_CRREL_PARITY_ERR] = CNTR_ELEM("PioSbrdctlCrrelParityErr", 0, 0,
4737                         CNTR_NORMAL,
4738                         access_pio_sbrdctl_crrel_parity_err_cnt),
4739 [C_PIO_PEC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPecFifoParityErr", 0, 0,
4740                         CNTR_NORMAL,
4741                         access_pio_pec_fifo_parity_err_cnt),
4742 [C_PIO_PCC_FIFO_PARITY_ERR] = CNTR_ELEM("PioPccFifoParityErr", 0, 0,
4743                         CNTR_NORMAL,
4744                         access_pio_pcc_fifo_parity_err_cnt),
4745 [C_PIO_SB_MEM_FIFO1_ERR] = CNTR_ELEM("PioSbMemFifo1Err", 0, 0,
4746                         CNTR_NORMAL,
4747                         access_pio_sb_mem_fifo1_err_cnt),
4748 [C_PIO_SB_MEM_FIFO0_ERR] = CNTR_ELEM("PioSbMemFifo0Err", 0, 0,
4749                         CNTR_NORMAL,
4750                         access_pio_sb_mem_fifo0_err_cnt),
4751 [C_PIO_CSR_PARITY_ERR] = CNTR_ELEM("PioCsrParityErr", 0, 0,
4752                         CNTR_NORMAL,
4753                         access_pio_csr_parity_err_cnt),
4754 [C_PIO_WRITE_ADDR_PARITY_ERR] = CNTR_ELEM("PioWriteAddrParityErr", 0, 0,
4755                         CNTR_NORMAL,
4756                         access_pio_write_addr_parity_err_cnt),
4757 [C_PIO_WRITE_BAD_CTXT_ERR] = CNTR_ELEM("PioWriteBadCtxtErr", 0, 0,
4758                         CNTR_NORMAL,
4759                         access_pio_write_bad_ctxt_err_cnt),
4760 /* SendDmaErrStatus */
4761 [C_SDMA_PCIE_REQ_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPcieReqTrackingCorErr", 0,
4762                         0, CNTR_NORMAL,
4763                         access_sdma_pcie_req_tracking_cor_err_cnt),
4764 [C_SDMA_PCIE_REQ_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPcieReqTrackingUncErr", 0,
4765                         0, CNTR_NORMAL,
4766                         access_sdma_pcie_req_tracking_unc_err_cnt),
4767 [C_SDMA_CSR_PARITY_ERR] = CNTR_ELEM("SDmaCsrParityErr", 0, 0,
4768                         CNTR_NORMAL,
4769                         access_sdma_csr_parity_err_cnt),
4770 [C_SDMA_RPY_TAG_ERR] = CNTR_ELEM("SDmaRpyTagErr", 0, 0,
4771                         CNTR_NORMAL,
4772                         access_sdma_rpy_tag_err_cnt),
4773 /* SendEgressErrStatus */
4774 [C_TX_READ_PIO_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryCsrUncErr", 0, 0,
4775                         CNTR_NORMAL,
4776                         access_tx_read_pio_memory_csr_unc_err_cnt),
4777 [C_TX_READ_SDMA_MEMORY_CSR_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryCsrUncErr", 0,
4778                         0, CNTR_NORMAL,
4779                         access_tx_read_sdma_memory_csr_err_cnt),
4780 [C_TX_EGRESS_FIFO_COR_ERR] = CNTR_ELEM("TxEgressFifoCorErr", 0, 0,
4781                         CNTR_NORMAL,
4782                         access_tx_egress_fifo_cor_err_cnt),
4783 [C_TX_READ_PIO_MEMORY_COR_ERR] = CNTR_ELEM("TxReadPioMemoryCorErr", 0, 0,
4784                         CNTR_NORMAL,
4785                         access_tx_read_pio_memory_cor_err_cnt),
4786 [C_TX_READ_SDMA_MEMORY_COR_ERR] = CNTR_ELEM("TxReadSdmaMemoryCorErr", 0, 0,
4787                         CNTR_NORMAL,
4788                         access_tx_read_sdma_memory_cor_err_cnt),
4789 [C_TX_SB_HDR_COR_ERR] = CNTR_ELEM("TxSbHdrCorErr", 0, 0,
4790                         CNTR_NORMAL,
4791                         access_tx_sb_hdr_cor_err_cnt),
4792 [C_TX_CREDIT_OVERRUN_ERR] = CNTR_ELEM("TxCreditOverrunErr", 0, 0,
4793                         CNTR_NORMAL,
4794                         access_tx_credit_overrun_err_cnt),
4795 [C_TX_LAUNCH_FIFO8_COR_ERR] = CNTR_ELEM("TxLaunchFifo8CorErr", 0, 0,
4796                         CNTR_NORMAL,
4797                         access_tx_launch_fifo8_cor_err_cnt),
4798 [C_TX_LAUNCH_FIFO7_COR_ERR] = CNTR_ELEM("TxLaunchFifo7CorErr", 0, 0,
4799                         CNTR_NORMAL,
4800                         access_tx_launch_fifo7_cor_err_cnt),
4801 [C_TX_LAUNCH_FIFO6_COR_ERR] = CNTR_ELEM("TxLaunchFifo6CorErr", 0, 0,
4802                         CNTR_NORMAL,
4803                         access_tx_launch_fifo6_cor_err_cnt),
4804 [C_TX_LAUNCH_FIFO5_COR_ERR] = CNTR_ELEM("TxLaunchFifo5CorErr", 0, 0,
4805                         CNTR_NORMAL,
4806                         access_tx_launch_fifo5_cor_err_cnt),
4807 [C_TX_LAUNCH_FIFO4_COR_ERR] = CNTR_ELEM("TxLaunchFifo4CorErr", 0, 0,
4808                         CNTR_NORMAL,
4809                         access_tx_launch_fifo4_cor_err_cnt),
4810 [C_TX_LAUNCH_FIFO3_COR_ERR] = CNTR_ELEM("TxLaunchFifo3CorErr", 0, 0,
4811                         CNTR_NORMAL,
4812                         access_tx_launch_fifo3_cor_err_cnt),
4813 [C_TX_LAUNCH_FIFO2_COR_ERR] = CNTR_ELEM("TxLaunchFifo2CorErr", 0, 0,
4814                         CNTR_NORMAL,
4815                         access_tx_launch_fifo2_cor_err_cnt),
4816 [C_TX_LAUNCH_FIFO1_COR_ERR] = CNTR_ELEM("TxLaunchFifo1CorErr", 0, 0,
4817                         CNTR_NORMAL,
4818                         access_tx_launch_fifo1_cor_err_cnt),
4819 [C_TX_LAUNCH_FIFO0_COR_ERR] = CNTR_ELEM("TxLaunchFifo0CorErr", 0, 0,
4820                         CNTR_NORMAL,
4821                         access_tx_launch_fifo0_cor_err_cnt),
4822 [C_TX_CREDIT_RETURN_VL_ERR] = CNTR_ELEM("TxCreditReturnVLErr", 0, 0,
4823                         CNTR_NORMAL,
4824                         access_tx_credit_return_vl_err_cnt),
4825 [C_TX_HCRC_INSERTION_ERR] = CNTR_ELEM("TxHcrcInsertionErr", 0, 0,
4826                         CNTR_NORMAL,
4827                         access_tx_hcrc_insertion_err_cnt),
4828 [C_TX_EGRESS_FIFI_UNC_ERR] = CNTR_ELEM("TxEgressFifoUncErr", 0, 0,
4829                         CNTR_NORMAL,
4830                         access_tx_egress_fifo_unc_err_cnt),
4831 [C_TX_READ_PIO_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadPioMemoryUncErr", 0, 0,
4832                         CNTR_NORMAL,
4833                         access_tx_read_pio_memory_unc_err_cnt),
4834 [C_TX_READ_SDMA_MEMORY_UNC_ERR] = CNTR_ELEM("TxReadSdmaMemoryUncErr", 0, 0,
4835                         CNTR_NORMAL,
4836                         access_tx_read_sdma_memory_unc_err_cnt),
4837 [C_TX_SB_HDR_UNC_ERR] = CNTR_ELEM("TxSbHdrUncErr", 0, 0,
4838                         CNTR_NORMAL,
4839                         access_tx_sb_hdr_unc_err_cnt),
4840 [C_TX_CREDIT_RETURN_PARITY_ERR] = CNTR_ELEM("TxCreditReturnParityErr", 0, 0,
4841                         CNTR_NORMAL,
4842                         access_tx_credit_return_partiy_err_cnt),
4843 [C_TX_LAUNCH_FIFO8_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo8UncOrParityErr",
4844                         0, 0, CNTR_NORMAL,
4845                         access_tx_launch_fifo8_unc_or_parity_err_cnt),
4846 [C_TX_LAUNCH_FIFO7_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo7UncOrParityErr",
4847                         0, 0, CNTR_NORMAL,
4848                         access_tx_launch_fifo7_unc_or_parity_err_cnt),
4849 [C_TX_LAUNCH_FIFO6_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo6UncOrParityErr",
4850                         0, 0, CNTR_NORMAL,
4851                         access_tx_launch_fifo6_unc_or_parity_err_cnt),
4852 [C_TX_LAUNCH_FIFO5_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo5UncOrParityErr",
4853                         0, 0, CNTR_NORMAL,
4854                         access_tx_launch_fifo5_unc_or_parity_err_cnt),
4855 [C_TX_LAUNCH_FIFO4_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo4UncOrParityErr",
4856                         0, 0, CNTR_NORMAL,
4857                         access_tx_launch_fifo4_unc_or_parity_err_cnt),
4858 [C_TX_LAUNCH_FIFO3_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo3UncOrParityErr",
4859                         0, 0, CNTR_NORMAL,
4860                         access_tx_launch_fifo3_unc_or_parity_err_cnt),
4861 [C_TX_LAUNCH_FIFO2_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo2UncOrParityErr",
4862                         0, 0, CNTR_NORMAL,
4863                         access_tx_launch_fifo2_unc_or_parity_err_cnt),
4864 [C_TX_LAUNCH_FIFO1_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo1UncOrParityErr",
4865                         0, 0, CNTR_NORMAL,
4866                         access_tx_launch_fifo1_unc_or_parity_err_cnt),
4867 [C_TX_LAUNCH_FIFO0_UNC_OR_PARITY_ERR] = CNTR_ELEM("TxLaunchFifo0UncOrParityErr",
4868                         0, 0, CNTR_NORMAL,
4869                         access_tx_launch_fifo0_unc_or_parity_err_cnt),
4870 [C_TX_SDMA15_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma15DisallowedPacketErr",
4871                         0, 0, CNTR_NORMAL,
4872                         access_tx_sdma15_disallowed_packet_err_cnt),
4873 [C_TX_SDMA14_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma14DisallowedPacketErr",
4874                         0, 0, CNTR_NORMAL,
4875                         access_tx_sdma14_disallowed_packet_err_cnt),
4876 [C_TX_SDMA13_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma13DisallowedPacketErr",
4877                         0, 0, CNTR_NORMAL,
4878                         access_tx_sdma13_disallowed_packet_err_cnt),
4879 [C_TX_SDMA12_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma12DisallowedPacketErr",
4880                         0, 0, CNTR_NORMAL,
4881                         access_tx_sdma12_disallowed_packet_err_cnt),
4882 [C_TX_SDMA11_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma11DisallowedPacketErr",
4883                         0, 0, CNTR_NORMAL,
4884                         access_tx_sdma11_disallowed_packet_err_cnt),
4885 [C_TX_SDMA10_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma10DisallowedPacketErr",
4886                         0, 0, CNTR_NORMAL,
4887                         access_tx_sdma10_disallowed_packet_err_cnt),
4888 [C_TX_SDMA9_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma9DisallowedPacketErr",
4889                         0, 0, CNTR_NORMAL,
4890                         access_tx_sdma9_disallowed_packet_err_cnt),
4891 [C_TX_SDMA8_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma8DisallowedPacketErr",
4892                         0, 0, CNTR_NORMAL,
4893                         access_tx_sdma8_disallowed_packet_err_cnt),
4894 [C_TX_SDMA7_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma7DisallowedPacketErr",
4895                         0, 0, CNTR_NORMAL,
4896                         access_tx_sdma7_disallowed_packet_err_cnt),
4897 [C_TX_SDMA6_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma6DisallowedPacketErr",
4898                         0, 0, CNTR_NORMAL,
4899                         access_tx_sdma6_disallowed_packet_err_cnt),
4900 [C_TX_SDMA5_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma5DisallowedPacketErr",
4901                         0, 0, CNTR_NORMAL,
4902                         access_tx_sdma5_disallowed_packet_err_cnt),
4903 [C_TX_SDMA4_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma4DisallowedPacketErr",
4904                         0, 0, CNTR_NORMAL,
4905                         access_tx_sdma4_disallowed_packet_err_cnt),
4906 [C_TX_SDMA3_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma3DisallowedPacketErr",
4907                         0, 0, CNTR_NORMAL,
4908                         access_tx_sdma3_disallowed_packet_err_cnt),
4909 [C_TX_SDMA2_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma2DisallowedPacketErr",
4910                         0, 0, CNTR_NORMAL,
4911                         access_tx_sdma2_disallowed_packet_err_cnt),
4912 [C_TX_SDMA1_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma1DisallowedPacketErr",
4913                         0, 0, CNTR_NORMAL,
4914                         access_tx_sdma1_disallowed_packet_err_cnt),
4915 [C_TX_SDMA0_DISALLOWED_PACKET_ERR] = CNTR_ELEM("TxSdma0DisallowedPacketErr",
4916                         0, 0, CNTR_NORMAL,
4917                         access_tx_sdma0_disallowed_packet_err_cnt),
4918 [C_TX_CONFIG_PARITY_ERR] = CNTR_ELEM("TxConfigParityErr", 0, 0,
4919                         CNTR_NORMAL,
4920                         access_tx_config_parity_err_cnt),
4921 [C_TX_SBRD_CTL_CSR_PARITY_ERR] = CNTR_ELEM("TxSbrdCtlCsrParityErr", 0, 0,
4922                         CNTR_NORMAL,
4923                         access_tx_sbrd_ctl_csr_parity_err_cnt),
4924 [C_TX_LAUNCH_CSR_PARITY_ERR] = CNTR_ELEM("TxLaunchCsrParityErr", 0, 0,
4925                         CNTR_NORMAL,
4926                         access_tx_launch_csr_parity_err_cnt),
4927 [C_TX_ILLEGAL_CL_ERR] = CNTR_ELEM("TxIllegalVLErr", 0, 0,
4928                         CNTR_NORMAL,
4929                         access_tx_illegal_vl_err_cnt),
4930 [C_TX_SBRD_CTL_STATE_MACHINE_PARITY_ERR] = CNTR_ELEM(
4931                         "TxSbrdCtlStateMachineParityErr", 0, 0,
4932                         CNTR_NORMAL,
4933                         access_tx_sbrd_ctl_state_machine_parity_err_cnt),
4934 [C_TX_RESERVED_10] = CNTR_ELEM("Tx Egress Reserved 10", 0, 0,
4935                         CNTR_NORMAL,
4936                         access_egress_reserved_10_err_cnt),
4937 [C_TX_RESERVED_9] = CNTR_ELEM("Tx Egress Reserved 9", 0, 0,
4938                         CNTR_NORMAL,
4939                         access_egress_reserved_9_err_cnt),
4940 [C_TX_SDMA_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxSdmaLaunchIntfParityErr",
4941                         0, 0, CNTR_NORMAL,
4942                         access_tx_sdma_launch_intf_parity_err_cnt),
4943 [C_TX_PIO_LAUNCH_INTF_PARITY_ERR] = CNTR_ELEM("TxPioLaunchIntfParityErr", 0, 0,
4944                         CNTR_NORMAL,
4945                         access_tx_pio_launch_intf_parity_err_cnt),
4946 [C_TX_RESERVED_6] = CNTR_ELEM("Tx Egress Reserved 6", 0, 0,
4947                         CNTR_NORMAL,
4948                         access_egress_reserved_6_err_cnt),
4949 [C_TX_INCORRECT_LINK_STATE_ERR] = CNTR_ELEM("TxIncorrectLinkStateErr", 0, 0,
4950                         CNTR_NORMAL,
4951                         access_tx_incorrect_link_state_err_cnt),
4952 [C_TX_LINK_DOWN_ERR] = CNTR_ELEM("TxLinkdownErr", 0, 0,
4953                         CNTR_NORMAL,
4954                         access_tx_linkdown_err_cnt),
4955 [C_TX_EGRESS_FIFO_UNDERRUN_OR_PARITY_ERR] = CNTR_ELEM(
4956                         "EgressFifoUnderrunOrParityErr", 0, 0,
4957                         CNTR_NORMAL,
4958                         access_tx_egress_fifi_underrun_or_parity_err_cnt),
4959 [C_TX_RESERVED_2] = CNTR_ELEM("Tx Egress Reserved 2", 0, 0,
4960                         CNTR_NORMAL,
4961                         access_egress_reserved_2_err_cnt),
4962 [C_TX_PKT_INTEGRITY_MEM_UNC_ERR] = CNTR_ELEM("TxPktIntegrityMemUncErr", 0, 0,
4963                         CNTR_NORMAL,
4964                         access_tx_pkt_integrity_mem_unc_err_cnt),
4965 [C_TX_PKT_INTEGRITY_MEM_COR_ERR] = CNTR_ELEM("TxPktIntegrityMemCorErr", 0, 0,
4966                         CNTR_NORMAL,
4967                         access_tx_pkt_integrity_mem_cor_err_cnt),
4968 /* SendErrStatus */
4969 [C_SEND_CSR_WRITE_BAD_ADDR_ERR] = CNTR_ELEM("SendCsrWriteBadAddrErr", 0, 0,
4970                         CNTR_NORMAL,
4971                         access_send_csr_write_bad_addr_err_cnt),
4972 [C_SEND_CSR_READ_BAD_ADD_ERR] = CNTR_ELEM("SendCsrReadBadAddrErr", 0, 0,
4973                         CNTR_NORMAL,
4974                         access_send_csr_read_bad_addr_err_cnt),
4975 [C_SEND_CSR_PARITY_ERR] = CNTR_ELEM("SendCsrParityErr", 0, 0,
4976                         CNTR_NORMAL,
4977                         access_send_csr_parity_cnt),
4978 /* SendCtxtErrStatus */
4979 [C_PIO_WRITE_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("PioWriteOutOfBoundsErr", 0, 0,
4980                         CNTR_NORMAL,
4981                         access_pio_write_out_of_bounds_err_cnt),
4982 [C_PIO_WRITE_OVERFLOW_ERR] = CNTR_ELEM("PioWriteOverflowErr", 0, 0,
4983                         CNTR_NORMAL,
4984                         access_pio_write_overflow_err_cnt),
4985 [C_PIO_WRITE_CROSSES_BOUNDARY_ERR] = CNTR_ELEM("PioWriteCrossesBoundaryErr",
4986                         0, 0, CNTR_NORMAL,
4987                         access_pio_write_crosses_boundary_err_cnt),
4988 [C_PIO_DISALLOWED_PACKET_ERR] = CNTR_ELEM("PioDisallowedPacketErr", 0, 0,
4989                         CNTR_NORMAL,
4990                         access_pio_disallowed_packet_err_cnt),
4991 [C_PIO_INCONSISTENT_SOP_ERR] = CNTR_ELEM("PioInconsistentSopErr", 0, 0,
4992                         CNTR_NORMAL,
4993                         access_pio_inconsistent_sop_err_cnt),
4994 /* SendDmaEngErrStatus */
4995 [C_SDMA_HEADER_REQUEST_FIFO_COR_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoCorErr",
4996                         0, 0, CNTR_NORMAL,
4997                         access_sdma_header_request_fifo_cor_err_cnt),
4998 [C_SDMA_HEADER_STORAGE_COR_ERR] = CNTR_ELEM("SDmaHeaderStorageCorErr", 0, 0,
4999                         CNTR_NORMAL,
5000                         access_sdma_header_storage_cor_err_cnt),
5001 [C_SDMA_PACKET_TRACKING_COR_ERR] = CNTR_ELEM("SDmaPacketTrackingCorErr", 0, 0,
5002                         CNTR_NORMAL,
5003                         access_sdma_packet_tracking_cor_err_cnt),
5004 [C_SDMA_ASSEMBLY_COR_ERR] = CNTR_ELEM("SDmaAssemblyCorErr", 0, 0,
5005                         CNTR_NORMAL,
5006                         access_sdma_assembly_cor_err_cnt),
5007 [C_SDMA_DESC_TABLE_COR_ERR] = CNTR_ELEM("SDmaDescTableCorErr", 0, 0,
5008                         CNTR_NORMAL,
5009                         access_sdma_desc_table_cor_err_cnt),
5010 [C_SDMA_HEADER_REQUEST_FIFO_UNC_ERR] = CNTR_ELEM("SDmaHeaderRequestFifoUncErr",
5011                         0, 0, CNTR_NORMAL,
5012                         access_sdma_header_request_fifo_unc_err_cnt),
5013 [C_SDMA_HEADER_STORAGE_UNC_ERR] = CNTR_ELEM("SDmaHeaderStorageUncErr", 0, 0,
5014                         CNTR_NORMAL,
5015                         access_sdma_header_storage_unc_err_cnt),
5016 [C_SDMA_PACKET_TRACKING_UNC_ERR] = CNTR_ELEM("SDmaPacketTrackingUncErr", 0, 0,
5017                         CNTR_NORMAL,
5018                         access_sdma_packet_tracking_unc_err_cnt),
5019 [C_SDMA_ASSEMBLY_UNC_ERR] = CNTR_ELEM("SDmaAssemblyUncErr", 0, 0,
5020                         CNTR_NORMAL,
5021                         access_sdma_assembly_unc_err_cnt),
5022 [C_SDMA_DESC_TABLE_UNC_ERR] = CNTR_ELEM("SDmaDescTableUncErr", 0, 0,
5023                         CNTR_NORMAL,
5024                         access_sdma_desc_table_unc_err_cnt),
5025 [C_SDMA_TIMEOUT_ERR] = CNTR_ELEM("SDmaTimeoutErr", 0, 0,
5026                         CNTR_NORMAL,
5027                         access_sdma_timeout_err_cnt),
5028 [C_SDMA_HEADER_LENGTH_ERR] = CNTR_ELEM("SDmaHeaderLengthErr", 0, 0,
5029                         CNTR_NORMAL,
5030                         access_sdma_header_length_err_cnt),
5031 [C_SDMA_HEADER_ADDRESS_ERR] = CNTR_ELEM("SDmaHeaderAddressErr", 0, 0,
5032                         CNTR_NORMAL,
5033                         access_sdma_header_address_err_cnt),
5034 [C_SDMA_HEADER_SELECT_ERR] = CNTR_ELEM("SDmaHeaderSelectErr", 0, 0,
5035                         CNTR_NORMAL,
5036                         access_sdma_header_select_err_cnt),
5037 [C_SMDA_RESERVED_9] = CNTR_ELEM("SDma Reserved 9", 0, 0,
5038                         CNTR_NORMAL,
5039                         access_sdma_reserved_9_err_cnt),
5040 [C_SDMA_PACKET_DESC_OVERFLOW_ERR] = CNTR_ELEM("SDmaPacketDescOverflowErr", 0, 0,
5041                         CNTR_NORMAL,
5042                         access_sdma_packet_desc_overflow_err_cnt),
5043 [C_SDMA_LENGTH_MISMATCH_ERR] = CNTR_ELEM("SDmaLengthMismatchErr", 0, 0,
5044                         CNTR_NORMAL,
5045                         access_sdma_length_mismatch_err_cnt),
5046 [C_SDMA_HALT_ERR] = CNTR_ELEM("SDmaHaltErr", 0, 0,
5047                         CNTR_NORMAL,
5048                         access_sdma_halt_err_cnt),
5049 [C_SDMA_MEM_READ_ERR] = CNTR_ELEM("SDmaMemReadErr", 0, 0,
5050                         CNTR_NORMAL,
5051                         access_sdma_mem_read_err_cnt),
5052 [C_SDMA_FIRST_DESC_ERR] = CNTR_ELEM("SDmaFirstDescErr", 0, 0,
5053                         CNTR_NORMAL,
5054                         access_sdma_first_desc_err_cnt),
5055 [C_SDMA_TAIL_OUT_OF_BOUNDS_ERR] = CNTR_ELEM("SDmaTailOutOfBoundsErr", 0, 0,
5056                         CNTR_NORMAL,
5057                         access_sdma_tail_out_of_bounds_err_cnt),
5058 [C_SDMA_TOO_LONG_ERR] = CNTR_ELEM("SDmaTooLongErr", 0, 0,
5059                         CNTR_NORMAL,
5060                         access_sdma_too_long_err_cnt),
5061 [C_SDMA_GEN_MISMATCH_ERR] = CNTR_ELEM("SDmaGenMismatchErr", 0, 0,
5062                         CNTR_NORMAL,
5063                         access_sdma_gen_mismatch_err_cnt),
5064 [C_SDMA_WRONG_DW_ERR] = CNTR_ELEM("SDmaWrongDwErr", 0, 0,
5065                         CNTR_NORMAL,
5066                         access_sdma_wrong_dw_err_cnt),
5067 };
5068
5069 static struct cntr_entry port_cntrs[PORT_CNTR_LAST] = {
5070 [C_TX_UNSUP_VL] = TXE32_PORT_CNTR_ELEM(TxUnVLErr, SEND_UNSUP_VL_ERR_CNT,
5071                         CNTR_NORMAL),
5072 [C_TX_INVAL_LEN] = TXE32_PORT_CNTR_ELEM(TxInvalLen, SEND_LEN_ERR_CNT,
5073                         CNTR_NORMAL),
5074 [C_TX_MM_LEN_ERR] = TXE32_PORT_CNTR_ELEM(TxMMLenErr, SEND_MAX_MIN_LEN_ERR_CNT,
5075                         CNTR_NORMAL),
5076 [C_TX_UNDERRUN] = TXE32_PORT_CNTR_ELEM(TxUnderrun, SEND_UNDERRUN_CNT,
5077                         CNTR_NORMAL),
5078 [C_TX_FLOW_STALL] = TXE32_PORT_CNTR_ELEM(TxFlowStall, SEND_FLOW_STALL_CNT,
5079                         CNTR_NORMAL),
5080 [C_TX_DROPPED] = TXE32_PORT_CNTR_ELEM(TxDropped, SEND_DROPPED_PKT_CNT,
5081                         CNTR_NORMAL),
5082 [C_TX_HDR_ERR] = TXE32_PORT_CNTR_ELEM(TxHdrErr, SEND_HEADERS_ERR_CNT,
5083                         CNTR_NORMAL),
5084 [C_TX_PKT] = TXE64_PORT_CNTR_ELEM(TxPkt, SEND_DATA_PKT_CNT, CNTR_NORMAL),
5085 [C_TX_WORDS] = TXE64_PORT_CNTR_ELEM(TxWords, SEND_DWORD_CNT, CNTR_NORMAL),
5086 [C_TX_WAIT] = TXE64_PORT_CNTR_ELEM(TxWait, SEND_WAIT_CNT, CNTR_SYNTH),
5087 [C_TX_FLIT_VL] = TXE64_PORT_CNTR_ELEM(TxFlitVL, SEND_DATA_VL0_CNT,
5088                                       CNTR_SYNTH | CNTR_VL),
5089 [C_TX_PKT_VL] = TXE64_PORT_CNTR_ELEM(TxPktVL, SEND_DATA_PKT_VL0_CNT,
5090                                      CNTR_SYNTH | CNTR_VL),
5091 [C_TX_WAIT_VL] = TXE64_PORT_CNTR_ELEM(TxWaitVL, SEND_WAIT_VL0_CNT,
5092                                       CNTR_SYNTH | CNTR_VL),
5093 [C_RX_PKT] = RXE64_PORT_CNTR_ELEM(RxPkt, RCV_DATA_PKT_CNT, CNTR_NORMAL),
5094 [C_RX_WORDS] = RXE64_PORT_CNTR_ELEM(RxWords, RCV_DWORD_CNT, CNTR_NORMAL),
5095 [C_SW_LINK_DOWN] = CNTR_ELEM("SwLinkDown", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5096                              access_sw_link_dn_cnt),
5097 [C_SW_LINK_UP] = CNTR_ELEM("SwLinkUp", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5098                            access_sw_link_up_cnt),
5099 [C_SW_UNKNOWN_FRAME] = CNTR_ELEM("UnknownFrame", 0, 0, CNTR_NORMAL,
5100                                  access_sw_unknown_frame_cnt),
5101 [C_SW_XMIT_DSCD] = CNTR_ELEM("XmitDscd", 0, 0, CNTR_SYNTH | CNTR_32BIT,
5102                              access_sw_xmit_discards),
5103 [C_SW_XMIT_DSCD_VL] = CNTR_ELEM("XmitDscdVl", 0, 0,
5104                                 CNTR_SYNTH | CNTR_32BIT | CNTR_VL,
5105                                 access_sw_xmit_discards),
5106 [C_SW_XMIT_CSTR_ERR] = CNTR_ELEM("XmitCstrErr", 0, 0, CNTR_SYNTH,
5107                                  access_xmit_constraint_errs),
5108 [C_SW_RCV_CSTR_ERR] = CNTR_ELEM("RcvCstrErr", 0, 0, CNTR_SYNTH,
5109                                 access_rcv_constraint_errs),
5110 [C_SW_IBP_LOOP_PKTS] = SW_IBP_CNTR(LoopPkts, loop_pkts),
5111 [C_SW_IBP_RC_RESENDS] = SW_IBP_CNTR(RcResend, rc_resends),
5112 [C_SW_IBP_RNR_NAKS] = SW_IBP_CNTR(RnrNak, rnr_naks),
5113 [C_SW_IBP_OTHER_NAKS] = SW_IBP_CNTR(OtherNak, other_naks),
5114 [C_SW_IBP_RC_TIMEOUTS] = SW_IBP_CNTR(RcTimeOut, rc_timeouts),
5115 [C_SW_IBP_PKT_DROPS] = SW_IBP_CNTR(PktDrop, pkt_drops),
5116 [C_SW_IBP_DMA_WAIT] = SW_IBP_CNTR(DmaWait, dmawait),
5117 [C_SW_IBP_RC_SEQNAK] = SW_IBP_CNTR(RcSeqNak, rc_seqnak),
5118 [C_SW_IBP_RC_DUPREQ] = SW_IBP_CNTR(RcDupRew, rc_dupreq),
5119 [C_SW_IBP_RDMA_SEQ] = SW_IBP_CNTR(RdmaSeq, rdma_seq),
5120 [C_SW_IBP_UNALIGNED] = SW_IBP_CNTR(Unaligned, unaligned),
5121 [C_SW_IBP_SEQ_NAK] = SW_IBP_CNTR(SeqNak, seq_naks),
5122 [C_SW_CPU_RC_ACKS] = CNTR_ELEM("RcAcks", 0, 0, CNTR_NORMAL,
5123                                access_sw_cpu_rc_acks),
5124 [C_SW_CPU_RC_QACKS] = CNTR_ELEM("RcQacks", 0, 0, CNTR_NORMAL,
5125                                 access_sw_cpu_rc_qacks),
5126 [C_SW_CPU_RC_DELAYED_COMP] = CNTR_ELEM("RcDelayComp", 0, 0, CNTR_NORMAL,
5127                                        access_sw_cpu_rc_delayed_comp),
5128 [OVR_LBL(0)] = OVR_ELM(0), [OVR_LBL(1)] = OVR_ELM(1),
5129 [OVR_LBL(2)] = OVR_ELM(2), [OVR_LBL(3)] = OVR_ELM(3),
5130 [OVR_LBL(4)] = OVR_ELM(4), [OVR_LBL(5)] = OVR_ELM(5),
5131 [OVR_LBL(6)] = OVR_ELM(6), [OVR_LBL(7)] = OVR_ELM(7),
5132 [OVR_LBL(8)] = OVR_ELM(8), [OVR_LBL(9)] = OVR_ELM(9),
5133 [OVR_LBL(10)] = OVR_ELM(10), [OVR_LBL(11)] = OVR_ELM(11),
5134 [OVR_LBL(12)] = OVR_ELM(12), [OVR_LBL(13)] = OVR_ELM(13),
5135 [OVR_LBL(14)] = OVR_ELM(14), [OVR_LBL(15)] = OVR_ELM(15),
5136 [OVR_LBL(16)] = OVR_ELM(16), [OVR_LBL(17)] = OVR_ELM(17),
5137 [OVR_LBL(18)] = OVR_ELM(18), [OVR_LBL(19)] = OVR_ELM(19),
5138 [OVR_LBL(20)] = OVR_ELM(20), [OVR_LBL(21)] = OVR_ELM(21),
5139 [OVR_LBL(22)] = OVR_ELM(22), [OVR_LBL(23)] = OVR_ELM(23),
5140 [OVR_LBL(24)] = OVR_ELM(24), [OVR_LBL(25)] = OVR_ELM(25),
5141 [OVR_LBL(26)] = OVR_ELM(26), [OVR_LBL(27)] = OVR_ELM(27),
5142 [OVR_LBL(28)] = OVR_ELM(28), [OVR_LBL(29)] = OVR_ELM(29),
5143 [OVR_LBL(30)] = OVR_ELM(30), [OVR_LBL(31)] = OVR_ELM(31),
5144 [OVR_LBL(32)] = OVR_ELM(32), [OVR_LBL(33)] = OVR_ELM(33),
5145 [OVR_LBL(34)] = OVR_ELM(34), [OVR_LBL(35)] = OVR_ELM(35),
5146 [OVR_LBL(36)] = OVR_ELM(36), [OVR_LBL(37)] = OVR_ELM(37),
5147 [OVR_LBL(38)] = OVR_ELM(38), [OVR_LBL(39)] = OVR_ELM(39),
5148 [OVR_LBL(40)] = OVR_ELM(40), [OVR_LBL(41)] = OVR_ELM(41),
5149 [OVR_LBL(42)] = OVR_ELM(42), [OVR_LBL(43)] = OVR_ELM(43),
5150 [OVR_LBL(44)] = OVR_ELM(44), [OVR_LBL(45)] = OVR_ELM(45),
5151 [OVR_LBL(46)] = OVR_ELM(46), [OVR_LBL(47)] = OVR_ELM(47),
5152 [OVR_LBL(48)] = OVR_ELM(48), [OVR_LBL(49)] = OVR_ELM(49),
5153 [OVR_LBL(50)] = OVR_ELM(50), [OVR_LBL(51)] = OVR_ELM(51),
5154 [OVR_LBL(52)] = OVR_ELM(52), [OVR_LBL(53)] = OVR_ELM(53),
5155 [OVR_LBL(54)] = OVR_ELM(54), [OVR_LBL(55)] = OVR_ELM(55),
5156 [OVR_LBL(56)] = OVR_ELM(56), [OVR_LBL(57)] = OVR_ELM(57),
5157 [OVR_LBL(58)] = OVR_ELM(58), [OVR_LBL(59)] = OVR_ELM(59),
5158 [OVR_LBL(60)] = OVR_ELM(60), [OVR_LBL(61)] = OVR_ELM(61),
5159 [OVR_LBL(62)] = OVR_ELM(62), [OVR_LBL(63)] = OVR_ELM(63),
5160 [OVR_LBL(64)] = OVR_ELM(64), [OVR_LBL(65)] = OVR_ELM(65),
5161 [OVR_LBL(66)] = OVR_ELM(66), [OVR_LBL(67)] = OVR_ELM(67),
5162 [OVR_LBL(68)] = OVR_ELM(68), [OVR_LBL(69)] = OVR_ELM(69),
5163 [OVR_LBL(70)] = OVR_ELM(70), [OVR_LBL(71)] = OVR_ELM(71),
5164 [OVR_LBL(72)] = OVR_ELM(72), [OVR_LBL(73)] = OVR_ELM(73),
5165 [OVR_LBL(74)] = OVR_ELM(74), [OVR_LBL(75)] = OVR_ELM(75),
5166 [OVR_LBL(76)] = OVR_ELM(76), [OVR_LBL(77)] = OVR_ELM(77),
5167 [OVR_LBL(78)] = OVR_ELM(78), [OVR_LBL(79)] = OVR_ELM(79),
5168 [OVR_LBL(80)] = OVR_ELM(80), [OVR_LBL(81)] = OVR_ELM(81),
5169 [OVR_LBL(82)] = OVR_ELM(82), [OVR_LBL(83)] = OVR_ELM(83),
5170 [OVR_LBL(84)] = OVR_ELM(84), [OVR_LBL(85)] = OVR_ELM(85),
5171 [OVR_LBL(86)] = OVR_ELM(86), [OVR_LBL(87)] = OVR_ELM(87),
5172 [OVR_LBL(88)] = OVR_ELM(88), [OVR_LBL(89)] = OVR_ELM(89),
5173 [OVR_LBL(90)] = OVR_ELM(90), [OVR_LBL(91)] = OVR_ELM(91),
5174 [OVR_LBL(92)] = OVR_ELM(92), [OVR_LBL(93)] = OVR_ELM(93),
5175 [OVR_LBL(94)] = OVR_ELM(94), [OVR_LBL(95)] = OVR_ELM(95),
5176 [OVR_LBL(96)] = OVR_ELM(96), [OVR_LBL(97)] = OVR_ELM(97),
5177 [OVR_LBL(98)] = OVR_ELM(98), [OVR_LBL(99)] = OVR_ELM(99),
5178 [OVR_LBL(100)] = OVR_ELM(100), [OVR_LBL(101)] = OVR_ELM(101),
5179 [OVR_LBL(102)] = OVR_ELM(102), [OVR_LBL(103)] = OVR_ELM(103),
5180 [OVR_LBL(104)] = OVR_ELM(104), [OVR_LBL(105)] = OVR_ELM(105),
5181 [OVR_LBL(106)] = OVR_ELM(106), [OVR_LBL(107)] = OVR_ELM(107),
5182 [OVR_LBL(108)] = OVR_ELM(108), [OVR_LBL(109)] = OVR_ELM(109),
5183 [OVR_LBL(110)] = OVR_ELM(110), [OVR_LBL(111)] = OVR_ELM(111),
5184 [OVR_LBL(112)] = OVR_ELM(112), [OVR_LBL(113)] = OVR_ELM(113),
5185 [OVR_LBL(114)] = OVR_ELM(114), [OVR_LBL(115)] = OVR_ELM(115),
5186 [OVR_LBL(116)] = OVR_ELM(116), [OVR_LBL(117)] = OVR_ELM(117),
5187 [OVR_LBL(118)] = OVR_ELM(118), [OVR_LBL(119)] = OVR_ELM(119),
5188 [OVR_LBL(120)] = OVR_ELM(120), [OVR_LBL(121)] = OVR_ELM(121),
5189 [OVR_LBL(122)] = OVR_ELM(122), [OVR_LBL(123)] = OVR_ELM(123),
5190 [OVR_LBL(124)] = OVR_ELM(124), [OVR_LBL(125)] = OVR_ELM(125),
5191 [OVR_LBL(126)] = OVR_ELM(126), [OVR_LBL(127)] = OVR_ELM(127),
5192 [OVR_LBL(128)] = OVR_ELM(128), [OVR_LBL(129)] = OVR_ELM(129),
5193 [OVR_LBL(130)] = OVR_ELM(130), [OVR_LBL(131)] = OVR_ELM(131),
5194 [OVR_LBL(132)] = OVR_ELM(132), [OVR_LBL(133)] = OVR_ELM(133),
5195 [OVR_LBL(134)] = OVR_ELM(134), [OVR_LBL(135)] = OVR_ELM(135),
5196 [OVR_LBL(136)] = OVR_ELM(136), [OVR_LBL(137)] = OVR_ELM(137),
5197 [OVR_LBL(138)] = OVR_ELM(138), [OVR_LBL(139)] = OVR_ELM(139),
5198 [OVR_LBL(140)] = OVR_ELM(140), [OVR_LBL(141)] = OVR_ELM(141),
5199 [OVR_LBL(142)] = OVR_ELM(142), [OVR_LBL(143)] = OVR_ELM(143),
5200 [OVR_LBL(144)] = OVR_ELM(144), [OVR_LBL(145)] = OVR_ELM(145),
5201 [OVR_LBL(146)] = OVR_ELM(146), [OVR_LBL(147)] = OVR_ELM(147),
5202 [OVR_LBL(148)] = OVR_ELM(148), [OVR_LBL(149)] = OVR_ELM(149),
5203 [OVR_LBL(150)] = OVR_ELM(150), [OVR_LBL(151)] = OVR_ELM(151),
5204 [OVR_LBL(152)] = OVR_ELM(152), [OVR_LBL(153)] = OVR_ELM(153),
5205 [OVR_LBL(154)] = OVR_ELM(154), [OVR_LBL(155)] = OVR_ELM(155),
5206 [OVR_LBL(156)] = OVR_ELM(156), [OVR_LBL(157)] = OVR_ELM(157),
5207 [OVR_LBL(158)] = OVR_ELM(158), [OVR_LBL(159)] = OVR_ELM(159),
5208 };
5209
5210 /* ======================================================================== */
5211
5212 /* return true if this is chip revision revision a */
5213 int is_ax(struct hfi1_devdata *dd)
5214 {
5215         u8 chip_rev_minor =
5216                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5217                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5218         return (chip_rev_minor & 0xf0) == 0;
5219 }
5220
5221 /* return true if this is chip revision revision b */
5222 int is_bx(struct hfi1_devdata *dd)
5223 {
5224         u8 chip_rev_minor =
5225                 dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT
5226                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
5227         return (chip_rev_minor & 0xF0) == 0x10;
5228 }
5229
5230 /* return true is kernel urg disabled for rcd */
5231 bool is_urg_masked(struct hfi1_ctxtdata *rcd)
5232 {
5233         u64 mask;
5234         u32 is = IS_RCVURGENT_START + rcd->ctxt;
5235         u8 bit = is % 64;
5236
5237         mask = read_csr(rcd->dd, CCE_INT_MASK + (8 * (is / 64)));
5238         return !(mask & BIT_ULL(bit));
5239 }
5240
5241 /*
5242  * Append string s to buffer buf.  Arguments curp and len are the current
5243  * position and remaining length, respectively.
5244  *
5245  * return 0 on success, 1 on out of room
5246  */
5247 static int append_str(char *buf, char **curp, int *lenp, const char *s)
5248 {
5249         char *p = *curp;
5250         int len = *lenp;
5251         int result = 0; /* success */
5252         char c;
5253
5254         /* add a comma, if first in the buffer */
5255         if (p != buf) {
5256                 if (len == 0) {
5257                         result = 1; /* out of room */
5258                         goto done;
5259                 }
5260                 *p++ = ',';
5261                 len--;
5262         }
5263
5264         /* copy the string */
5265         while ((c = *s++) != 0) {
5266                 if (len == 0) {
5267                         result = 1; /* out of room */
5268                         goto done;
5269                 }
5270                 *p++ = c;
5271                 len--;
5272         }
5273
5274 done:
5275         /* write return values */
5276         *curp = p;
5277         *lenp = len;
5278
5279         return result;
5280 }
5281
5282 /*
5283  * Using the given flag table, print a comma separated string into
5284  * the buffer.  End in '*' if the buffer is too short.
5285  */
5286 static char *flag_string(char *buf, int buf_len, u64 flags,
5287                          struct flag_table *table, int table_size)
5288 {
5289         char extra[32];
5290         char *p = buf;
5291         int len = buf_len;
5292         int no_room = 0;
5293         int i;
5294
5295         /* make sure there is at least 2 so we can form "*" */
5296         if (len < 2)
5297                 return "";
5298
5299         len--;  /* leave room for a nul */
5300         for (i = 0; i < table_size; i++) {
5301                 if (flags & table[i].flag) {
5302                         no_room = append_str(buf, &p, &len, table[i].str);
5303                         if (no_room)
5304                                 break;
5305                         flags &= ~table[i].flag;
5306                 }
5307         }
5308
5309         /* any undocumented bits left? */
5310         if (!no_room && flags) {
5311                 snprintf(extra, sizeof(extra), "bits 0x%llx", flags);
5312                 no_room = append_str(buf, &p, &len, extra);
5313         }
5314
5315         /* add * if ran out of room */
5316         if (no_room) {
5317                 /* may need to back up to add space for a '*' */
5318                 if (len == 0)
5319                         --p;
5320                 *p++ = '*';
5321         }
5322
5323         /* add final nul - space already allocated above */
5324         *p = 0;
5325         return buf;
5326 }
5327
5328 /* first 8 CCE error interrupt source names */
5329 static const char * const cce_misc_names[] = {
5330         "CceErrInt",            /* 0 */
5331         "RxeErrInt",            /* 1 */
5332         "MiscErrInt",           /* 2 */
5333         "Reserved3",            /* 3 */
5334         "PioErrInt",            /* 4 */
5335         "SDmaErrInt",           /* 5 */
5336         "EgressErrInt",         /* 6 */
5337         "TxeErrInt"             /* 7 */
5338 };
5339
5340 /*
5341  * Return the miscellaneous error interrupt name.
5342  */
5343 static char *is_misc_err_name(char *buf, size_t bsize, unsigned int source)
5344 {
5345         if (source < ARRAY_SIZE(cce_misc_names))
5346                 strncpy(buf, cce_misc_names[source], bsize);
5347         else
5348                 snprintf(buf, bsize, "Reserved%u",
5349                          source + IS_GENERAL_ERR_START);
5350
5351         return buf;
5352 }
5353
5354 /*
5355  * Return the SDMA engine error interrupt name.
5356  */
5357 static char *is_sdma_eng_err_name(char *buf, size_t bsize, unsigned int source)
5358 {
5359         snprintf(buf, bsize, "SDmaEngErrInt%u", source);
5360         return buf;
5361 }
5362
5363 /*
5364  * Return the send context error interrupt name.
5365  */
5366 static char *is_sendctxt_err_name(char *buf, size_t bsize, unsigned int source)
5367 {
5368         snprintf(buf, bsize, "SendCtxtErrInt%u", source);
5369         return buf;
5370 }
5371
5372 static const char * const various_names[] = {
5373         "PbcInt",
5374         "GpioAssertInt",
5375         "Qsfp1Int",
5376         "Qsfp2Int",
5377         "TCritInt"
5378 };
5379
5380 /*
5381  * Return the various interrupt name.
5382  */
5383 static char *is_various_name(char *buf, size_t bsize, unsigned int source)
5384 {
5385         if (source < ARRAY_SIZE(various_names))
5386                 strncpy(buf, various_names[source], bsize);
5387         else
5388                 snprintf(buf, bsize, "Reserved%u", source + IS_VARIOUS_START);
5389         return buf;
5390 }
5391
5392 /*
5393  * Return the DC interrupt name.
5394  */
5395 static char *is_dc_name(char *buf, size_t bsize, unsigned int source)
5396 {
5397         static const char * const dc_int_names[] = {
5398                 "common",
5399                 "lcb",
5400                 "8051",
5401                 "lbm"   /* local block merge */
5402         };
5403
5404         if (source < ARRAY_SIZE(dc_int_names))
5405                 snprintf(buf, bsize, "dc_%s_int", dc_int_names[source]);
5406         else
5407                 snprintf(buf, bsize, "DCInt%u", source);
5408         return buf;
5409 }
5410
5411 static const char * const sdma_int_names[] = {
5412         "SDmaInt",
5413         "SdmaIdleInt",
5414         "SdmaProgressInt",
5415 };
5416
5417 /*
5418  * Return the SDMA engine interrupt name.
5419  */
5420 static char *is_sdma_eng_name(char *buf, size_t bsize, unsigned int source)
5421 {
5422         /* what interrupt */
5423         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
5424         /* which engine */
5425         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
5426
5427         if (likely(what < 3))
5428                 snprintf(buf, bsize, "%s%u", sdma_int_names[what], which);
5429         else
5430                 snprintf(buf, bsize, "Invalid SDMA interrupt %u", source);
5431         return buf;
5432 }
5433
5434 /*
5435  * Return the receive available interrupt name.
5436  */
5437 static char *is_rcv_avail_name(char *buf, size_t bsize, unsigned int source)
5438 {
5439         snprintf(buf, bsize, "RcvAvailInt%u", source);
5440         return buf;
5441 }
5442
5443 /*
5444  * Return the receive urgent interrupt name.
5445  */
5446 static char *is_rcv_urgent_name(char *buf, size_t bsize, unsigned int source)
5447 {
5448         snprintf(buf, bsize, "RcvUrgentInt%u", source);
5449         return buf;
5450 }
5451
5452 /*
5453  * Return the send credit interrupt name.
5454  */
5455 static char *is_send_credit_name(char *buf, size_t bsize, unsigned int source)
5456 {
5457         snprintf(buf, bsize, "SendCreditInt%u", source);
5458         return buf;
5459 }
5460
5461 /*
5462  * Return the reserved interrupt name.
5463  */
5464 static char *is_reserved_name(char *buf, size_t bsize, unsigned int source)
5465 {
5466         snprintf(buf, bsize, "Reserved%u", source + IS_RESERVED_START);
5467         return buf;
5468 }
5469
5470 static char *cce_err_status_string(char *buf, int buf_len, u64 flags)
5471 {
5472         return flag_string(buf, buf_len, flags,
5473                            cce_err_status_flags,
5474                            ARRAY_SIZE(cce_err_status_flags));
5475 }
5476
5477 static char *rxe_err_status_string(char *buf, int buf_len, u64 flags)
5478 {
5479         return flag_string(buf, buf_len, flags,
5480                            rxe_err_status_flags,
5481                            ARRAY_SIZE(rxe_err_status_flags));
5482 }
5483
5484 static char *misc_err_status_string(char *buf, int buf_len, u64 flags)
5485 {
5486         return flag_string(buf, buf_len, flags, misc_err_status_flags,
5487                            ARRAY_SIZE(misc_err_status_flags));
5488 }
5489
5490 static char *pio_err_status_string(char *buf, int buf_len, u64 flags)
5491 {
5492         return flag_string(buf, buf_len, flags,
5493                            pio_err_status_flags,
5494                            ARRAY_SIZE(pio_err_status_flags));
5495 }
5496
5497 static char *sdma_err_status_string(char *buf, int buf_len, u64 flags)
5498 {
5499         return flag_string(buf, buf_len, flags,
5500                            sdma_err_status_flags,
5501                            ARRAY_SIZE(sdma_err_status_flags));
5502 }
5503
5504 static char *egress_err_status_string(char *buf, int buf_len, u64 flags)
5505 {
5506         return flag_string(buf, buf_len, flags,
5507                            egress_err_status_flags,
5508                            ARRAY_SIZE(egress_err_status_flags));
5509 }
5510
5511 static char *egress_err_info_string(char *buf, int buf_len, u64 flags)
5512 {
5513         return flag_string(buf, buf_len, flags,
5514                            egress_err_info_flags,
5515                            ARRAY_SIZE(egress_err_info_flags));
5516 }
5517
5518 static char *send_err_status_string(char *buf, int buf_len, u64 flags)
5519 {
5520         return flag_string(buf, buf_len, flags,
5521                            send_err_status_flags,
5522                            ARRAY_SIZE(send_err_status_flags));
5523 }
5524
5525 static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5526 {
5527         char buf[96];
5528         int i = 0;
5529
5530         /*
5531          * For most these errors, there is nothing that can be done except
5532          * report or record it.
5533          */
5534         dd_dev_info(dd, "CCE Error: %s\n",
5535                     cce_err_status_string(buf, sizeof(buf), reg));
5536
5537         if ((reg & CCE_ERR_STATUS_CCE_CLI2_ASYNC_FIFO_PARITY_ERR_SMASK) &&
5538             is_ax(dd) && (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)) {
5539                 /* this error requires a manual drop into SPC freeze mode */
5540                 /* then a fix up */
5541                 start_freeze_handling(dd->pport, FREEZE_SELF);
5542         }
5543
5544         for (i = 0; i < NUM_CCE_ERR_STATUS_COUNTERS; i++) {
5545                 if (reg & (1ull << i)) {
5546                         incr_cntr64(&dd->cce_err_status_cnt[i]);
5547                         /* maintain a counter over all cce_err_status errors */
5548                         incr_cntr64(&dd->sw_cce_err_status_aggregate);
5549                 }
5550         }
5551 }
5552
5553 /*
5554  * Check counters for receive errors that do not have an interrupt
5555  * associated with them.
5556  */
5557 #define RCVERR_CHECK_TIME 10
5558 static void update_rcverr_timer(struct timer_list *t)
5559 {
5560         struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
5561         struct hfi1_pportdata *ppd = dd->pport;
5562         u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
5563
5564         if (dd->rcv_ovfl_cnt < cur_ovfl_cnt &&
5565             ppd->port_error_action & OPA_PI_MASK_EX_BUFFER_OVERRUN) {
5566                 dd_dev_info(dd, "%s: PortErrorAction bounce\n", __func__);
5567                 set_link_down_reason(
5568                 ppd, OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN, 0,
5569                 OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN);
5570                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
5571         }
5572         dd->rcv_ovfl_cnt = (u32)cur_ovfl_cnt;
5573
5574         mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5575 }
5576
5577 static int init_rcverr(struct hfi1_devdata *dd)
5578 {
5579         timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
5580         /* Assume the hardware counter has been reset */
5581         dd->rcv_ovfl_cnt = 0;
5582         return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
5583 }
5584
5585 static void free_rcverr(struct hfi1_devdata *dd)
5586 {
5587         if (dd->rcverr_timer.function)
5588                 del_timer_sync(&dd->rcverr_timer);
5589 }
5590
5591 static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5592 {
5593         char buf[96];
5594         int i = 0;
5595
5596         dd_dev_info(dd, "Receive Error: %s\n",
5597                     rxe_err_status_string(buf, sizeof(buf), reg));
5598
5599         if (reg & ALL_RXE_FREEZE_ERR) {
5600                 int flags = 0;
5601
5602                 /*
5603                  * Freeze mode recovery is disabled for the errors
5604                  * in RXE_FREEZE_ABORT_MASK
5605                  */
5606                 if (is_ax(dd) && (reg & RXE_FREEZE_ABORT_MASK))
5607                         flags = FREEZE_ABORT;
5608
5609                 start_freeze_handling(dd->pport, flags);
5610         }
5611
5612         for (i = 0; i < NUM_RCV_ERR_STATUS_COUNTERS; i++) {
5613                 if (reg & (1ull << i))
5614                         incr_cntr64(&dd->rcv_err_status_cnt[i]);
5615         }
5616 }
5617
5618 static void handle_misc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5619 {
5620         char buf[96];
5621         int i = 0;
5622
5623         dd_dev_info(dd, "Misc Error: %s",
5624                     misc_err_status_string(buf, sizeof(buf), reg));
5625         for (i = 0; i < NUM_MISC_ERR_STATUS_COUNTERS; i++) {
5626                 if (reg & (1ull << i))
5627                         incr_cntr64(&dd->misc_err_status_cnt[i]);
5628         }
5629 }
5630
5631 static void handle_pio_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5632 {
5633         char buf[96];
5634         int i = 0;
5635
5636         dd_dev_info(dd, "PIO Error: %s\n",
5637                     pio_err_status_string(buf, sizeof(buf), reg));
5638
5639         if (reg & ALL_PIO_FREEZE_ERR)
5640                 start_freeze_handling(dd->pport, 0);
5641
5642         for (i = 0; i < NUM_SEND_PIO_ERR_STATUS_COUNTERS; i++) {
5643                 if (reg & (1ull << i))
5644                         incr_cntr64(&dd->send_pio_err_status_cnt[i]);
5645         }
5646 }
5647
5648 static void handle_sdma_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5649 {
5650         char buf[96];
5651         int i = 0;
5652
5653         dd_dev_info(dd, "SDMA Error: %s\n",
5654                     sdma_err_status_string(buf, sizeof(buf), reg));
5655
5656         if (reg & ALL_SDMA_FREEZE_ERR)
5657                 start_freeze_handling(dd->pport, 0);
5658
5659         for (i = 0; i < NUM_SEND_DMA_ERR_STATUS_COUNTERS; i++) {
5660                 if (reg & (1ull << i))
5661                         incr_cntr64(&dd->send_dma_err_status_cnt[i]);
5662         }
5663 }
5664
5665 static inline void __count_port_discards(struct hfi1_pportdata *ppd)
5666 {
5667         incr_cntr64(&ppd->port_xmit_discards);
5668 }
5669
5670 static void count_port_inactive(struct hfi1_devdata *dd)
5671 {
5672         __count_port_discards(dd->pport);
5673 }
5674
5675 /*
5676  * We have had a "disallowed packet" error during egress. Determine the
5677  * integrity check which failed, and update relevant error counter, etc.
5678  *
5679  * Note that the SEND_EGRESS_ERR_INFO register has only a single
5680  * bit of state per integrity check, and so we can miss the reason for an
5681  * egress error if more than one packet fails the same integrity check
5682  * since we cleared the corresponding bit in SEND_EGRESS_ERR_INFO.
5683  */
5684 static void handle_send_egress_err_info(struct hfi1_devdata *dd,
5685                                         int vl)
5686 {
5687         struct hfi1_pportdata *ppd = dd->pport;
5688         u64 src = read_csr(dd, SEND_EGRESS_ERR_SOURCE); /* read first */
5689         u64 info = read_csr(dd, SEND_EGRESS_ERR_INFO);
5690         char buf[96];
5691
5692         /* clear down all observed info as quickly as possible after read */
5693         write_csr(dd, SEND_EGRESS_ERR_INFO, info);
5694
5695         dd_dev_info(dd,
5696                     "Egress Error Info: 0x%llx, %s Egress Error Src 0x%llx\n",
5697                     info, egress_err_info_string(buf, sizeof(buf), info), src);
5698
5699         /* Eventually add other counters for each bit */
5700         if (info & PORT_DISCARD_EGRESS_ERRS) {
5701                 int weight, i;
5702
5703                 /*
5704                  * Count all applicable bits as individual errors and
5705                  * attribute them to the packet that triggered this handler.
5706                  * This may not be completely accurate due to limitations
5707                  * on the available hardware error information.  There is
5708                  * a single information register and any number of error
5709                  * packets may have occurred and contributed to it before
5710                  * this routine is called.  This means that:
5711                  * a) If multiple packets with the same error occur before
5712                  *    this routine is called, earlier packets are missed.
5713                  *    There is only a single bit for each error type.
5714                  * b) Errors may not be attributed to the correct VL.
5715                  *    The driver is attributing all bits in the info register
5716                  *    to the packet that triggered this call, but bits
5717                  *    could be an accumulation of different packets with
5718                  *    different VLs.
5719                  * c) A single error packet may have multiple counts attached
5720                  *    to it.  There is no way for the driver to know if
5721                  *    multiple bits set in the info register are due to a
5722                  *    single packet or multiple packets.  The driver assumes
5723                  *    multiple packets.
5724                  */
5725                 weight = hweight64(info & PORT_DISCARD_EGRESS_ERRS);
5726                 for (i = 0; i < weight; i++) {
5727                         __count_port_discards(ppd);
5728                         if (vl >= 0 && vl < TXE_NUM_DATA_VL)
5729                                 incr_cntr64(&ppd->port_xmit_discards_vl[vl]);
5730                         else if (vl == 15)
5731                                 incr_cntr64(&ppd->port_xmit_discards_vl
5732                                             [C_VL_15]);
5733                 }
5734         }
5735 }
5736
5737 /*
5738  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5739  * register. Does it represent a 'port inactive' error?
5740  */
5741 static inline int port_inactive_err(u64 posn)
5742 {
5743         return (posn >= SEES(TX_LINKDOWN) &&
5744                 posn <= SEES(TX_INCORRECT_LINK_STATE));
5745 }
5746
5747 /*
5748  * Input value is a bit position within the SEND_EGRESS_ERR_STATUS
5749  * register. Does it represent a 'disallowed packet' error?
5750  */
5751 static inline int disallowed_pkt_err(int posn)
5752 {
5753         return (posn >= SEES(TX_SDMA0_DISALLOWED_PACKET) &&
5754                 posn <= SEES(TX_SDMA15_DISALLOWED_PACKET));
5755 }
5756
5757 /*
5758  * Input value is a bit position of one of the SDMA engine disallowed
5759  * packet errors.  Return which engine.  Use of this must be guarded by
5760  * disallowed_pkt_err().
5761  */
5762 static inline int disallowed_pkt_engine(int posn)
5763 {
5764         return posn - SEES(TX_SDMA0_DISALLOWED_PACKET);
5765 }
5766
5767 /*
5768  * Translate an SDMA engine to a VL.  Return -1 if the tranlation cannot
5769  * be done.
5770  */
5771 static int engine_to_vl(struct hfi1_devdata *dd, int engine)
5772 {
5773         struct sdma_vl_map *m;
5774         int vl;
5775
5776         /* range check */
5777         if (engine < 0 || engine >= TXE_NUM_SDMA_ENGINES)
5778                 return -1;
5779
5780         rcu_read_lock();
5781         m = rcu_dereference(dd->sdma_map);
5782         vl = m->engine_to_vl[engine];
5783         rcu_read_unlock();
5784
5785         return vl;
5786 }
5787
5788 /*
5789  * Translate the send context (sofware index) into a VL.  Return -1 if the
5790  * translation cannot be done.
5791  */
5792 static int sc_to_vl(struct hfi1_devdata *dd, int sw_index)
5793 {
5794         struct send_context_info *sci;
5795         struct send_context *sc;
5796         int i;
5797
5798         sci = &dd->send_contexts[sw_index];
5799
5800         /* there is no information for user (PSM) and ack contexts */
5801         if ((sci->type != SC_KERNEL) && (sci->type != SC_VL15))
5802                 return -1;
5803
5804         sc = sci->sc;
5805         if (!sc)
5806                 return -1;
5807         if (dd->vld[15].sc == sc)
5808                 return 15;
5809         for (i = 0; i < num_vls; i++)
5810                 if (dd->vld[i].sc == sc)
5811                         return i;
5812
5813         return -1;
5814 }
5815
5816 static void handle_egress_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5817 {
5818         u64 reg_copy = reg, handled = 0;
5819         char buf[96];
5820         int i = 0;
5821
5822         if (reg & ALL_TXE_EGRESS_FREEZE_ERR)
5823                 start_freeze_handling(dd->pport, 0);
5824         else if (is_ax(dd) &&
5825                  (reg & SEND_EGRESS_ERR_STATUS_TX_CREDIT_RETURN_VL_ERR_SMASK) &&
5826                  (dd->icode != ICODE_FUNCTIONAL_SIMULATOR))
5827                 start_freeze_handling(dd->pport, 0);
5828
5829         while (reg_copy) {
5830                 int posn = fls64(reg_copy);
5831                 /* fls64() returns a 1-based offset, we want it zero based */
5832                 int shift = posn - 1;
5833                 u64 mask = 1ULL << shift;
5834
5835                 if (port_inactive_err(shift)) {
5836                         count_port_inactive(dd);
5837                         handled |= mask;
5838                 } else if (disallowed_pkt_err(shift)) {
5839                         int vl = engine_to_vl(dd, disallowed_pkt_engine(shift));
5840
5841                         handle_send_egress_err_info(dd, vl);
5842                         handled |= mask;
5843                 }
5844                 reg_copy &= ~mask;
5845         }
5846
5847         reg &= ~handled;
5848
5849         if (reg)
5850                 dd_dev_info(dd, "Egress Error: %s\n",
5851                             egress_err_status_string(buf, sizeof(buf), reg));
5852
5853         for (i = 0; i < NUM_SEND_EGRESS_ERR_STATUS_COUNTERS; i++) {
5854                 if (reg & (1ull << i))
5855                         incr_cntr64(&dd->send_egress_err_status_cnt[i]);
5856         }
5857 }
5858
5859 static void handle_txe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
5860 {
5861         char buf[96];
5862         int i = 0;
5863
5864         dd_dev_info(dd, "Send Error: %s\n",
5865                     send_err_status_string(buf, sizeof(buf), reg));
5866
5867         for (i = 0; i < NUM_SEND_ERR_STATUS_COUNTERS; i++) {
5868                 if (reg & (1ull << i))
5869                         incr_cntr64(&dd->send_err_status_cnt[i]);
5870         }
5871 }
5872
5873 /*
5874  * The maximum number of times the error clear down will loop before
5875  * blocking a repeating error.  This value is arbitrary.
5876  */
5877 #define MAX_CLEAR_COUNT 20
5878
5879 /*
5880  * Clear and handle an error register.  All error interrupts are funneled
5881  * through here to have a central location to correctly handle single-
5882  * or multi-shot errors.
5883  *
5884  * For non per-context registers, call this routine with a context value
5885  * of 0 so the per-context offset is zero.
5886  *
5887  * If the handler loops too many times, assume that something is wrong
5888  * and can't be fixed, so mask the error bits.
5889  */
5890 static void interrupt_clear_down(struct hfi1_devdata *dd,
5891                                  u32 context,
5892                                  const struct err_reg_info *eri)
5893 {
5894         u64 reg;
5895         u32 count;
5896
5897         /* read in a loop until no more errors are seen */
5898         count = 0;
5899         while (1) {
5900                 reg = read_kctxt_csr(dd, context, eri->status);
5901                 if (reg == 0)
5902                         break;
5903                 write_kctxt_csr(dd, context, eri->clear, reg);
5904                 if (likely(eri->handler))
5905                         eri->handler(dd, context, reg);
5906                 count++;
5907                 if (count > MAX_CLEAR_COUNT) {
5908                         u64 mask;
5909
5910                         dd_dev_err(dd, "Repeating %s bits 0x%llx - masking\n",
5911                                    eri->desc, reg);
5912                         /*
5913                          * Read-modify-write so any other masked bits
5914                          * remain masked.
5915                          */
5916                         mask = read_kctxt_csr(dd, context, eri->mask);
5917                         mask &= ~reg;
5918                         write_kctxt_csr(dd, context, eri->mask, mask);
5919                         break;
5920                 }
5921         }
5922 }
5923
5924 /*
5925  * CCE block "misc" interrupt.  Source is < 16.
5926  */
5927 static void is_misc_err_int(struct hfi1_devdata *dd, unsigned int source)
5928 {
5929         const struct err_reg_info *eri = &misc_errs[source];
5930
5931         if (eri->handler) {
5932                 interrupt_clear_down(dd, 0, eri);
5933         } else {
5934                 dd_dev_err(dd, "Unexpected misc interrupt (%u) - reserved\n",
5935                            source);
5936         }
5937 }
5938
5939 static char *send_context_err_status_string(char *buf, int buf_len, u64 flags)
5940 {
5941         return flag_string(buf, buf_len, flags,
5942                            sc_err_status_flags,
5943                            ARRAY_SIZE(sc_err_status_flags));
5944 }
5945
5946 /*
5947  * Send context error interrupt.  Source (hw_context) is < 160.
5948  *
5949  * All send context errors cause the send context to halt.  The normal
5950  * clear-down mechanism cannot be used because we cannot clear the
5951  * error bits until several other long-running items are done first.
5952  * This is OK because with the context halted, nothing else is going
5953  * to happen on it anyway.
5954  */
5955 static void is_sendctxt_err_int(struct hfi1_devdata *dd,
5956                                 unsigned int hw_context)
5957 {
5958         struct send_context_info *sci;
5959         struct send_context *sc;
5960         char flags[96];
5961         u64 status;
5962         u32 sw_index;
5963         int i = 0;
5964         unsigned long irq_flags;
5965
5966         sw_index = dd->hw_to_sw[hw_context];
5967         if (sw_index >= dd->num_send_contexts) {
5968                 dd_dev_err(dd,
5969                            "out of range sw index %u for send context %u\n",
5970                            sw_index, hw_context);
5971                 return;
5972         }
5973         sci = &dd->send_contexts[sw_index];
5974         spin_lock_irqsave(&dd->sc_lock, irq_flags);
5975         sc = sci->sc;
5976         if (!sc) {
5977                 dd_dev_err(dd, "%s: context %u(%u): no sc?\n", __func__,
5978                            sw_index, hw_context);
5979                 spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
5980                 return;
5981         }
5982
5983         /* tell the software that a halt has begun */
5984         sc_stop(sc, SCF_HALTED);
5985
5986         status = read_kctxt_csr(dd, hw_context, SEND_CTXT_ERR_STATUS);
5987
5988         dd_dev_info(dd, "Send Context %u(%u) Error: %s\n", sw_index, hw_context,
5989                     send_context_err_status_string(flags, sizeof(flags),
5990                                                    status));
5991
5992         if (status & SEND_CTXT_ERR_STATUS_PIO_DISALLOWED_PACKET_ERR_SMASK)
5993                 handle_send_egress_err_info(dd, sc_to_vl(dd, sw_index));
5994
5995         /*
5996          * Automatically restart halted kernel contexts out of interrupt
5997          * context.  User contexts must ask the driver to restart the context.
5998          */
5999         if (sc->type != SC_USER)
6000                 queue_work(dd->pport->hfi1_wq, &sc->halt_work);
6001         spin_unlock_irqrestore(&dd->sc_lock, irq_flags);
6002
6003         /*
6004          * Update the counters for the corresponding status bits.
6005          * Note that these particular counters are aggregated over all
6006          * 160 contexts.
6007          */
6008         for (i = 0; i < NUM_SEND_CTXT_ERR_STATUS_COUNTERS; i++) {
6009                 if (status & (1ull << i))
6010                         incr_cntr64(&dd->sw_ctxt_err_status_cnt[i]);
6011         }
6012 }
6013
6014 static void handle_sdma_eng_err(struct hfi1_devdata *dd,
6015                                 unsigned int source, u64 status)
6016 {
6017         struct sdma_engine *sde;
6018         int i = 0;
6019
6020         sde = &dd->per_sdma[source];
6021 #ifdef CONFIG_SDMA_VERBOSITY
6022         dd_dev_err(sde->dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6023                    slashstrip(__FILE__), __LINE__, __func__);
6024         dd_dev_err(sde->dd, "CONFIG SDMA(%u) source: %u status 0x%llx\n",
6025                    sde->this_idx, source, (unsigned long long)status);
6026 #endif
6027         sde->err_cnt++;
6028         sdma_engine_error(sde, status);
6029
6030         /*
6031         * Update the counters for the corresponding status bits.
6032         * Note that these particular counters are aggregated over
6033         * all 16 DMA engines.
6034         */
6035         for (i = 0; i < NUM_SEND_DMA_ENG_ERR_STATUS_COUNTERS; i++) {
6036                 if (status & (1ull << i))
6037                         incr_cntr64(&dd->sw_send_dma_eng_err_status_cnt[i]);
6038         }
6039 }
6040
6041 /*
6042  * CCE block SDMA error interrupt.  Source is < 16.
6043  */
6044 static void is_sdma_eng_err_int(struct hfi1_devdata *dd, unsigned int source)
6045 {
6046 #ifdef CONFIG_SDMA_VERBOSITY
6047         struct sdma_engine *sde = &dd->per_sdma[source];
6048
6049         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
6050                    slashstrip(__FILE__), __LINE__, __func__);
6051         dd_dev_err(dd, "CONFIG SDMA(%u) source: %u\n", sde->this_idx,
6052                    source);
6053         sdma_dumpstate(sde);
6054 #endif
6055         interrupt_clear_down(dd, source, &sdma_eng_err);
6056 }
6057
6058 /*
6059  * CCE block "various" interrupt.  Source is < 8.
6060  */
6061 static void is_various_int(struct hfi1_devdata *dd, unsigned int source)
6062 {
6063         const struct err_reg_info *eri = &various_err[source];
6064
6065         /*
6066          * TCritInt cannot go through interrupt_clear_down()
6067          * because it is not a second tier interrupt. The handler
6068          * should be called directly.
6069          */
6070         if (source == TCRIT_INT_SOURCE)
6071                 handle_temp_err(dd);
6072         else if (eri->handler)
6073                 interrupt_clear_down(dd, 0, eri);
6074         else
6075                 dd_dev_info(dd,
6076                             "%s: Unimplemented/reserved interrupt %d\n",
6077                             __func__, source);
6078 }
6079
6080 static void handle_qsfp_int(struct hfi1_devdata *dd, u32 src_ctx, u64 reg)
6081 {
6082         /* src_ctx is always zero */
6083         struct hfi1_pportdata *ppd = dd->pport;
6084         unsigned long flags;
6085         u64 qsfp_int_mgmt = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
6086
6087         if (reg & QSFP_HFI0_MODPRST_N) {
6088                 if (!qsfp_mod_present(ppd)) {
6089                         dd_dev_info(dd, "%s: QSFP module removed\n",
6090                                     __func__);
6091
6092                         ppd->driver_link_ready = 0;
6093                         /*
6094                          * Cable removed, reset all our information about the
6095                          * cache and cable capabilities
6096                          */
6097
6098                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6099                         /*
6100                          * We don't set cache_refresh_required here as we expect
6101                          * an interrupt when a cable is inserted
6102                          */
6103                         ppd->qsfp_info.cache_valid = 0;
6104                         ppd->qsfp_info.reset_needed = 0;
6105                         ppd->qsfp_info.limiting_active = 0;
6106                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6107                                                flags);
6108                         /* Invert the ModPresent pin now to detect plug-in */
6109                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6110                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6111
6112                         if ((ppd->offline_disabled_reason >
6113                           HFI1_ODR_MASK(
6114                           OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED)) ||
6115                           (ppd->offline_disabled_reason ==
6116                           HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE)))
6117                                 ppd->offline_disabled_reason =
6118                                 HFI1_ODR_MASK(
6119                                 OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED);
6120
6121                         if (ppd->host_link_state == HLS_DN_POLL) {
6122                                 /*
6123                                  * The link is still in POLL. This means
6124                                  * that the normal link down processing
6125                                  * will not happen. We have to do it here
6126                                  * before turning the DC off.
6127                                  */
6128                                 queue_work(ppd->link_wq, &ppd->link_down_work);
6129                         }
6130                 } else {
6131                         dd_dev_info(dd, "%s: QSFP module inserted\n",
6132                                     __func__);
6133
6134                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6135                         ppd->qsfp_info.cache_valid = 0;
6136                         ppd->qsfp_info.cache_refresh_required = 1;
6137                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
6138                                                flags);
6139
6140                         /*
6141                          * Stop inversion of ModPresent pin to detect
6142                          * removal of the cable
6143                          */
6144                         qsfp_int_mgmt &= ~(u64)QSFP_HFI0_MODPRST_N;
6145                         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_INVERT :
6146                                   ASIC_QSFP1_INVERT, qsfp_int_mgmt);
6147
6148                         ppd->offline_disabled_reason =
6149                                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
6150                 }
6151         }
6152
6153         if (reg & QSFP_HFI0_INT_N) {
6154                 dd_dev_info(dd, "%s: Interrupt received from QSFP module\n",
6155                             __func__);
6156                 spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
6157                 ppd->qsfp_info.check_interrupt_flags = 1;
6158                 spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock, flags);
6159         }
6160
6161         /* Schedule the QSFP work only if there is a cable attached. */
6162         if (qsfp_mod_present(ppd))
6163                 queue_work(ppd->link_wq, &ppd->qsfp_info.qsfp_work);
6164 }
6165
6166 static int request_host_lcb_access(struct hfi1_devdata *dd)
6167 {
6168         int ret;
6169
6170         ret = do_8051_command(dd, HCMD_MISC,
6171                               (u64)HCMD_MISC_REQUEST_LCB_ACCESS <<
6172                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6173         if (ret != HCMD_SUCCESS) {
6174                 dd_dev_err(dd, "%s: command failed with error %d\n",
6175                            __func__, ret);
6176         }
6177         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6178 }
6179
6180 static int request_8051_lcb_access(struct hfi1_devdata *dd)
6181 {
6182         int ret;
6183
6184         ret = do_8051_command(dd, HCMD_MISC,
6185                               (u64)HCMD_MISC_GRANT_LCB_ACCESS <<
6186                               LOAD_DATA_FIELD_ID_SHIFT, NULL);
6187         if (ret != HCMD_SUCCESS) {
6188                 dd_dev_err(dd, "%s: command failed with error %d\n",
6189                            __func__, ret);
6190         }
6191         return ret == HCMD_SUCCESS ? 0 : -EBUSY;
6192 }
6193
6194 /*
6195  * Set the LCB selector - allow host access.  The DCC selector always
6196  * points to the host.
6197  */
6198 static inline void set_host_lcb_access(struct hfi1_devdata *dd)
6199 {
6200         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6201                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK |
6202                   DC_DC8051_CFG_CSR_ACCESS_SEL_LCB_SMASK);
6203 }
6204
6205 /*
6206  * Clear the LCB selector - allow 8051 access.  The DCC selector always
6207  * points to the host.
6208  */
6209 static inline void set_8051_lcb_access(struct hfi1_devdata *dd)
6210 {
6211         write_csr(dd, DC_DC8051_CFG_CSR_ACCESS_SEL,
6212                   DC_DC8051_CFG_CSR_ACCESS_SEL_DCC_SMASK);
6213 }
6214
6215 /*
6216  * Acquire LCB access from the 8051.  If the host already has access,
6217  * just increment a counter.  Otherwise, inform the 8051 that the
6218  * host is taking access.
6219  *
6220  * Returns:
6221  *      0 on success
6222  *      -EBUSY if the 8051 has control and cannot be disturbed
6223  *      -errno if unable to acquire access from the 8051
6224  */
6225 int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6226 {
6227         struct hfi1_pportdata *ppd = dd->pport;
6228         int ret = 0;
6229
6230         /*
6231          * Use the host link state lock so the operation of this routine
6232          * { link state check, selector change, count increment } can occur
6233          * as a unit against a link state change.  Otherwise there is a
6234          * race between the state change and the count increment.
6235          */
6236         if (sleep_ok) {
6237                 mutex_lock(&ppd->hls_lock);
6238         } else {
6239                 while (!mutex_trylock(&ppd->hls_lock))
6240                         udelay(1);
6241         }
6242
6243         /* this access is valid only when the link is up */
6244         if (ppd->host_link_state & HLS_DOWN) {
6245                 dd_dev_info(dd, "%s: link state %s not up\n",
6246                             __func__, link_state_name(ppd->host_link_state));
6247                 ret = -EBUSY;
6248                 goto done;
6249         }
6250
6251         if (dd->lcb_access_count == 0) {
6252                 ret = request_host_lcb_access(dd);
6253                 if (ret) {
6254                         dd_dev_err(dd,
6255                                    "%s: unable to acquire LCB access, err %d\n",
6256                                    __func__, ret);
6257                         goto done;
6258                 }
6259                 set_host_lcb_access(dd);
6260         }
6261         dd->lcb_access_count++;
6262 done:
6263         mutex_unlock(&ppd->hls_lock);
6264         return ret;
6265 }
6266
6267 /*
6268  * Release LCB access by decrementing the use count.  If the count is moving
6269  * from 1 to 0, inform 8051 that it has control back.
6270  *
6271  * Returns:
6272  *      0 on success
6273  *      -errno if unable to release access to the 8051
6274  */
6275 int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
6276 {
6277         int ret = 0;
6278
6279         /*
6280          * Use the host link state lock because the acquire needed it.
6281          * Here, we only need to keep { selector change, count decrement }
6282          * as a unit.
6283          */
6284         if (sleep_ok) {
6285                 mutex_lock(&dd->pport->hls_lock);
6286         } else {
6287                 while (!mutex_trylock(&dd->pport->hls_lock))
6288                         udelay(1);
6289         }
6290
6291         if (dd->lcb_access_count == 0) {
6292                 dd_dev_err(dd, "%s: LCB access count is zero.  Skipping.\n",
6293                            __func__);
6294                 goto done;
6295         }
6296
6297         if (dd->lcb_access_count == 1) {
6298                 set_8051_lcb_access(dd);
6299                 ret = request_8051_lcb_access(dd);
6300                 if (ret) {
6301                         dd_dev_err(dd,
6302                                    "%s: unable to release LCB access, err %d\n",
6303                                    __func__, ret);
6304                         /* restore host access if the grant didn't work */
6305                         set_host_lcb_access(dd);
6306                         goto done;
6307                 }
6308         }
6309         dd->lcb_access_count--;
6310 done:
6311         mutex_unlock(&dd->pport->hls_lock);
6312         return ret;
6313 }
6314
6315 /*
6316  * Initialize LCB access variables and state.  Called during driver load,
6317  * after most of the initialization is finished.
6318  *
6319  * The DC default is LCB access on for the host.  The driver defaults to
6320  * leaving access to the 8051.  Assign access now - this constrains the call
6321  * to this routine to be after all LCB set-up is done.  In particular, after
6322  * hf1_init_dd() -> set_up_interrupts() -> clear_all_interrupts()
6323  */
6324 static void init_lcb_access(struct hfi1_devdata *dd)
6325 {
6326         dd->lcb_access_count = 0;
6327 }
6328
6329 /*
6330  * Write a response back to a 8051 request.
6331  */
6332 static void hreq_response(struct hfi1_devdata *dd, u8 return_code, u16 rsp_data)
6333 {
6334         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0,
6335                   DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK |
6336                   (u64)return_code <<
6337                   DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT |
6338                   (u64)rsp_data << DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
6339 }
6340
6341 /*
6342  * Handle host requests from the 8051.
6343  */
6344 static void handle_8051_request(struct hfi1_pportdata *ppd)
6345 {
6346         struct hfi1_devdata *dd = ppd->dd;
6347         u64 reg;
6348         u16 data = 0;
6349         u8 type;
6350
6351         reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_1);
6352         if ((reg & DC_DC8051_CFG_EXT_DEV_1_REQ_NEW_SMASK) == 0)
6353                 return; /* no request */
6354
6355         /* zero out COMPLETED so the response is seen */
6356         write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, 0);
6357
6358         /* extract request details */
6359         type = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_SHIFT)
6360                         & DC_DC8051_CFG_EXT_DEV_1_REQ_TYPE_MASK;
6361         data = (reg >> DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT)
6362                         & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_MASK;
6363
6364         switch (type) {
6365         case HREQ_LOAD_CONFIG:
6366         case HREQ_SAVE_CONFIG:
6367         case HREQ_READ_CONFIG:
6368         case HREQ_SET_TX_EQ_ABS:
6369         case HREQ_SET_TX_EQ_REL:
6370         case HREQ_ENABLE:
6371                 dd_dev_info(dd, "8051 request: request 0x%x not supported\n",
6372                             type);
6373                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6374                 break;
6375         case HREQ_LCB_RESET:
6376                 /* Put the LCB, RX FPE and TX FPE into reset */
6377                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_INTO_RESET);
6378                 /* Make sure the write completed */
6379                 (void)read_csr(dd, DCC_CFG_RESET);
6380                 /* Hold the reset long enough to take effect */
6381                 udelay(1);
6382                 /* Take the LCB, RX FPE and TX FPE out of reset */
6383                 write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6384                 hreq_response(dd, HREQ_SUCCESS, 0);
6385
6386                 break;
6387         case HREQ_CONFIG_DONE:
6388                 hreq_response(dd, HREQ_SUCCESS, 0);
6389                 break;
6390
6391         case HREQ_INTERFACE_TEST:
6392                 hreq_response(dd, HREQ_SUCCESS, data);
6393                 break;
6394         default:
6395                 dd_dev_err(dd, "8051 request: unknown request 0x%x\n", type);
6396                 hreq_response(dd, HREQ_NOT_SUPPORTED, 0);
6397                 break;
6398         }
6399 }
6400
6401 /*
6402  * Set up allocation unit vaulue.
6403  */
6404 void set_up_vau(struct hfi1_devdata *dd, u8 vau)
6405 {
6406         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6407
6408         /* do not modify other values in the register */
6409         reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
6410         reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
6411         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6412 }
6413
6414 /*
6415  * Set up initial VL15 credits of the remote.  Assumes the rest of
6416  * the CM credit registers are zero from a previous global or credit reset.
6417  * Shared limit for VL15 will always be 0.
6418  */
6419 void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
6420 {
6421         u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
6422
6423         /* set initial values for total and shared credit limit */
6424         reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
6425                  SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
6426
6427         /*
6428          * Set total limit to be equal to VL15 credits.
6429          * Leave shared limit at 0.
6430          */
6431         reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
6432         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
6433
6434         write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
6435                   << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
6436 }
6437
6438 /*
6439  * Zero all credit details from the previous connection and
6440  * reset the CM manager's internal counters.
6441  */
6442 void reset_link_credits(struct hfi1_devdata *dd)
6443 {
6444         int i;
6445
6446         /* remove all previous VL credit limits */
6447         for (i = 0; i < TXE_NUM_DATA_VL; i++)
6448                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
6449         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
6450         write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
6451         /* reset the CM block */
6452         pio_send_control(dd, PSC_CM_RESET);
6453         /* reset cached value */
6454         dd->vl15buf_cached = 0;
6455 }
6456
6457 /* convert a vCU to a CU */
6458 static u32 vcu_to_cu(u8 vcu)
6459 {
6460         return 1 << vcu;
6461 }
6462
6463 /* convert a CU to a vCU */
6464 static u8 cu_to_vcu(u32 cu)
6465 {
6466         return ilog2(cu);
6467 }
6468
6469 /* convert a vAU to an AU */
6470 static u32 vau_to_au(u8 vau)
6471 {
6472         return 8 * (1 << vau);
6473 }
6474
6475 static void set_linkup_defaults(struct hfi1_pportdata *ppd)
6476 {
6477         ppd->sm_trap_qp = 0x0;
6478         ppd->sa_qp = 0x1;
6479 }
6480
6481 /*
6482  * Graceful LCB shutdown.  This leaves the LCB FIFOs in reset.
6483  */
6484 static void lcb_shutdown(struct hfi1_devdata *dd, int abort)
6485 {
6486         u64 reg;
6487
6488         /* clear lcb run: LCB_CFG_RUN.EN = 0 */
6489         write_csr(dd, DC_LCB_CFG_RUN, 0);
6490         /* set tx fifo reset: LCB_CFG_TX_FIFOS_RESET.VAL = 1 */
6491         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET,
6492                   1ull << DC_LCB_CFG_TX_FIFOS_RESET_VAL_SHIFT);
6493         /* set dcc reset csr: DCC_CFG_RESET.{reset_lcb,reset_rx_fpe} = 1 */
6494         dd->lcb_err_en = read_csr(dd, DC_LCB_ERR_EN);
6495         reg = read_csr(dd, DCC_CFG_RESET);
6496         write_csr(dd, DCC_CFG_RESET, reg |
6497                   DCC_CFG_RESET_RESET_LCB | DCC_CFG_RESET_RESET_RX_FPE);
6498         (void)read_csr(dd, DCC_CFG_RESET); /* make sure the write completed */
6499         if (!abort) {
6500                 udelay(1);    /* must hold for the longer of 16cclks or 20ns */
6501                 write_csr(dd, DCC_CFG_RESET, reg);
6502                 write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6503         }
6504 }
6505
6506 /*
6507  * This routine should be called after the link has been transitioned to
6508  * OFFLINE (OFFLINE state has the side effect of putting the SerDes into
6509  * reset).
6510  *
6511  * The expectation is that the caller of this routine would have taken
6512  * care of properly transitioning the link into the correct state.
6513  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6514  *       before calling this function.
6515  */
6516 static void _dc_shutdown(struct hfi1_devdata *dd)
6517 {
6518         lockdep_assert_held(&dd->dc8051_lock);
6519
6520         if (dd->dc_shutdown)
6521                 return;
6522
6523         dd->dc_shutdown = 1;
6524         /* Shutdown the LCB */
6525         lcb_shutdown(dd, 1);
6526         /*
6527          * Going to OFFLINE would have causes the 8051 to put the
6528          * SerDes into reset already. Just need to shut down the 8051,
6529          * itself.
6530          */
6531         write_csr(dd, DC_DC8051_CFG_RST, 0x1);
6532 }
6533
6534 static void dc_shutdown(struct hfi1_devdata *dd)
6535 {
6536         mutex_lock(&dd->dc8051_lock);
6537         _dc_shutdown(dd);
6538         mutex_unlock(&dd->dc8051_lock);
6539 }
6540
6541 /*
6542  * Calling this after the DC has been brought out of reset should not
6543  * do any damage.
6544  * NOTE: the caller needs to acquire the dd->dc8051_lock lock
6545  *       before calling this function.
6546  */
6547 static void _dc_start(struct hfi1_devdata *dd)
6548 {
6549         lockdep_assert_held(&dd->dc8051_lock);
6550
6551         if (!dd->dc_shutdown)
6552                 return;
6553
6554         /* Take the 8051 out of reset */
6555         write_csr(dd, DC_DC8051_CFG_RST, 0ull);
6556         /* Wait until 8051 is ready */
6557         if (wait_fm_ready(dd, TIMEOUT_8051_START))
6558                 dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
6559                            __func__);
6560
6561         /* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
6562         write_csr(dd, DCC_CFG_RESET, LCB_RX_FPE_TX_FPE_OUT_OF_RESET);
6563         /* lcb_shutdown() with abort=1 does not restore these */
6564         write_csr(dd, DC_LCB_ERR_EN, dd->lcb_err_en);
6565         dd->dc_shutdown = 0;
6566 }
6567
6568 static void dc_start(struct hfi1_devdata *dd)
6569 {
6570         mutex_lock(&dd->dc8051_lock);
6571         _dc_start(dd);
6572         mutex_unlock(&dd->dc8051_lock);
6573 }
6574
6575 /*
6576  * These LCB adjustments are for the Aurora SerDes core in the FPGA.
6577  */
6578 static void adjust_lcb_for_fpga_serdes(struct hfi1_devdata *dd)
6579 {
6580         u64 rx_radr, tx_radr;
6581         u32 version;
6582
6583         if (dd->icode != ICODE_FPGA_EMULATION)
6584                 return;
6585
6586         /*
6587          * These LCB defaults on emulator _s are good, nothing to do here:
6588          *      LCB_CFG_TX_FIFOS_RADR
6589          *      LCB_CFG_RX_FIFOS_RADR
6590          *      LCB_CFG_LN_DCLK
6591          *      LCB_CFG_IGNORE_LOST_RCLK
6592          */
6593         if (is_emulator_s(dd))
6594                 return;
6595         /* else this is _p */
6596
6597         version = emulator_rev(dd);
6598         if (!is_ax(dd))
6599                 version = 0x2d; /* all B0 use 0x2d or higher settings */
6600
6601         if (version <= 0x12) {
6602                 /* release 0x12 and below */
6603
6604                 /*
6605                  * LCB_CFG_RX_FIFOS_RADR.RST_VAL = 0x9
6606                  * LCB_CFG_RX_FIFOS_RADR.OK_TO_JUMP_VAL = 0x9
6607                  * LCB_CFG_RX_FIFOS_RADR.DO_NOT_JUMP_VAL = 0xa
6608                  */
6609                 rx_radr =
6610                       0xaull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6611                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6612                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6613                 /*
6614                  * LCB_CFG_TX_FIFOS_RADR.ON_REINIT = 0 (default)
6615                  * LCB_CFG_TX_FIFOS_RADR.RST_VAL = 6
6616                  */
6617                 tx_radr = 6ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6618         } else if (version <= 0x18) {
6619                 /* release 0x13 up to 0x18 */
6620                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6621                 rx_radr =
6622                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6623                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6624                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6625                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6626         } else if (version == 0x19) {
6627                 /* release 0x19 */
6628                 /* LCB_CFG_RX_FIFOS_RADR = 0xa99 */
6629                 rx_radr =
6630                       0xAull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6631                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6632                     | 0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6633                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6634         } else if (version == 0x1a) {
6635                 /* release 0x1a */
6636                 /* LCB_CFG_RX_FIFOS_RADR = 0x988 */
6637                 rx_radr =
6638                       0x9ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6639                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6640                     | 0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6641                 tx_radr = 7ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6642                 write_csr(dd, DC_LCB_CFG_LN_DCLK, 1ull);
6643         } else {
6644                 /* release 0x1b and higher */
6645                 /* LCB_CFG_RX_FIFOS_RADR = 0x877 */
6646                 rx_radr =
6647                       0x8ull << DC_LCB_CFG_RX_FIFOS_RADR_DO_NOT_JUMP_VAL_SHIFT
6648                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_OK_TO_JUMP_VAL_SHIFT
6649                     | 0x7ull << DC_LCB_CFG_RX_FIFOS_RADR_RST_VAL_SHIFT;
6650                 tx_radr = 3ull << DC_LCB_CFG_TX_FIFOS_RADR_RST_VAL_SHIFT;
6651         }
6652
6653         write_csr(dd, DC_LCB_CFG_RX_FIFOS_RADR, rx_radr);
6654         /* LCB_CFG_IGNORE_LOST_RCLK.EN = 1 */
6655         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
6656                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
6657         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RADR, tx_radr);
6658 }
6659
6660 /*
6661  * Handle a SMA idle message
6662  *
6663  * This is a work-queue function outside of the interrupt.
6664  */
6665 void handle_sma_message(struct work_struct *work)
6666 {
6667         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6668                                                         sma_message_work);
6669         struct hfi1_devdata *dd = ppd->dd;
6670         u64 msg;
6671         int ret;
6672
6673         /*
6674          * msg is bytes 1-4 of the 40-bit idle message - the command code
6675          * is stripped off
6676          */
6677         ret = read_idle_sma(dd, &msg);
6678         if (ret)
6679                 return;
6680         dd_dev_info(dd, "%s: SMA message 0x%llx\n", __func__, msg);
6681         /*
6682          * React to the SMA message.  Byte[1] (0 for us) is the command.
6683          */
6684         switch (msg & 0xff) {
6685         case SMA_IDLE_ARM:
6686                 /*
6687                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6688                  * State Transitions
6689                  *
6690                  * Only expected in INIT or ARMED, discard otherwise.
6691                  */
6692                 if (ppd->host_link_state & (HLS_UP_INIT | HLS_UP_ARMED))
6693                         ppd->neighbor_normal = 1;
6694                 break;
6695         case SMA_IDLE_ACTIVE:
6696                 /*
6697                  * See OPAv1 table 9-14 - HFI and External Switch Ports Key
6698                  * State Transitions
6699                  *
6700                  * Can activate the node.  Discard otherwise.
6701                  */
6702                 if (ppd->host_link_state == HLS_UP_ARMED &&
6703                     ppd->is_active_optimize_enabled) {
6704                         ppd->neighbor_normal = 1;
6705                         ret = set_link_state(ppd, HLS_UP_ACTIVE);
6706                         if (ret)
6707                                 dd_dev_err(
6708                                         dd,
6709                                         "%s: received Active SMA idle message, couldn't set link to Active\n",
6710                                         __func__);
6711                 }
6712                 break;
6713         default:
6714                 dd_dev_err(dd,
6715                            "%s: received unexpected SMA idle message 0x%llx\n",
6716                            __func__, msg);
6717                 break;
6718         }
6719 }
6720
6721 static void adjust_rcvctrl(struct hfi1_devdata *dd, u64 add, u64 clear)
6722 {
6723         u64 rcvctrl;
6724         unsigned long flags;
6725
6726         spin_lock_irqsave(&dd->rcvctrl_lock, flags);
6727         rcvctrl = read_csr(dd, RCV_CTRL);
6728         rcvctrl |= add;
6729         rcvctrl &= ~clear;
6730         write_csr(dd, RCV_CTRL, rcvctrl);
6731         spin_unlock_irqrestore(&dd->rcvctrl_lock, flags);
6732 }
6733
6734 static inline void add_rcvctrl(struct hfi1_devdata *dd, u64 add)
6735 {
6736         adjust_rcvctrl(dd, add, 0);
6737 }
6738
6739 static inline void clear_rcvctrl(struct hfi1_devdata *dd, u64 clear)
6740 {
6741         adjust_rcvctrl(dd, 0, clear);
6742 }
6743
6744 /*
6745  * Called from all interrupt handlers to start handling an SPC freeze.
6746  */
6747 void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6748 {
6749         struct hfi1_devdata *dd = ppd->dd;
6750         struct send_context *sc;
6751         int i;
6752         int sc_flags;
6753
6754         if (flags & FREEZE_SELF)
6755                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6756
6757         /* enter frozen mode */
6758         dd->flags |= HFI1_FROZEN;
6759
6760         /* notify all SDMA engines that they are going into a freeze */
6761         sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6762
6763         sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6764                                               SCF_LINK_DOWN : 0);
6765         /* do halt pre-handling on all enabled send contexts */
6766         for (i = 0; i < dd->num_send_contexts; i++) {
6767                 sc = dd->send_contexts[i].sc;
6768                 if (sc && (sc->flags & SCF_ENABLED))
6769                         sc_stop(sc, sc_flags);
6770         }
6771
6772         /* Send context are frozen. Notify user space */
6773         hfi1_set_uevent_bits(ppd, _HFI1_EVENT_FROZEN_BIT);
6774
6775         if (flags & FREEZE_ABORT) {
6776                 dd_dev_err(dd,
6777                            "Aborted freeze recovery. Please REBOOT system\n");
6778                 return;
6779         }
6780         /* queue non-interrupt handler */
6781         queue_work(ppd->hfi1_wq, &ppd->freeze_work);
6782 }
6783
6784 /*
6785  * Wait until all 4 sub-blocks indicate that they have frozen or unfrozen,
6786  * depending on the "freeze" parameter.
6787  *
6788  * No need to return an error if it times out, our only option
6789  * is to proceed anyway.
6790  */
6791 static void wait_for_freeze_status(struct hfi1_devdata *dd, int freeze)
6792 {
6793         unsigned long timeout;
6794         u64 reg;
6795
6796         timeout = jiffies + msecs_to_jiffies(FREEZE_STATUS_TIMEOUT);
6797         while (1) {
6798                 reg = read_csr(dd, CCE_STATUS);
6799                 if (freeze) {
6800                         /* waiting until all indicators are set */
6801                         if ((reg & ALL_FROZE) == ALL_FROZE)
6802                                 return; /* all done */
6803                 } else {
6804                         /* waiting until all indicators are clear */
6805                         if ((reg & ALL_FROZE) == 0)
6806                                 return; /* all done */
6807                 }
6808
6809                 if (time_after(jiffies, timeout)) {
6810                         dd_dev_err(dd,
6811                                    "Time out waiting for SPC %sfreeze, bits 0x%llx, expecting 0x%llx, continuing",
6812                                    freeze ? "" : "un", reg & ALL_FROZE,
6813                                    freeze ? ALL_FROZE : 0ull);
6814                         return;
6815                 }
6816                 usleep_range(80, 120);
6817         }
6818 }
6819
6820 /*
6821  * Do all freeze handling for the RXE block.
6822  */
6823 static void rxe_freeze(struct hfi1_devdata *dd)
6824 {
6825         int i;
6826         struct hfi1_ctxtdata *rcd;
6827
6828         /* disable port */
6829         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6830
6831         /* disable all receive contexts */
6832         for (i = 0; i < dd->num_rcv_contexts; i++) {
6833                 rcd = hfi1_rcd_get_by_index(dd, i);
6834                 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS, rcd);
6835                 hfi1_rcd_put(rcd);
6836         }
6837 }
6838
6839 /*
6840  * Unfreeze handling for the RXE block - kernel contexts only.
6841  * This will also enable the port.  User contexts will do unfreeze
6842  * handling on a per-context basis as they call into the driver.
6843  *
6844  */
6845 static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
6846 {
6847         u32 rcvmask;
6848         u16 i;
6849         struct hfi1_ctxtdata *rcd;
6850
6851         /* enable all kernel contexts */
6852         for (i = 0; i < dd->num_rcv_contexts; i++) {
6853                 rcd = hfi1_rcd_get_by_index(dd, i);
6854
6855                 /* Ensure all non-user contexts(including vnic) are enabled */
6856                 if (!rcd ||
6857                     (i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
6858                         hfi1_rcd_put(rcd);
6859                         continue;
6860                 }
6861                 rcvmask = HFI1_RCVCTRL_CTXT_ENB;
6862                 /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
6863                 rcvmask |= rcd->rcvhdrtail_kvaddr ?
6864                         HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
6865                 hfi1_rcvctrl(dd, rcvmask, rcd);
6866                 hfi1_rcd_put(rcd);
6867         }
6868
6869         /* enable port */
6870         add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
6871 }
6872
6873 /*
6874  * Non-interrupt SPC freeze handling.
6875  *
6876  * This is a work-queue function outside of the triggering interrupt.
6877  */
6878 void handle_freeze(struct work_struct *work)
6879 {
6880         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6881                                                                 freeze_work);
6882         struct hfi1_devdata *dd = ppd->dd;
6883
6884         /* wait for freeze indicators on all affected blocks */
6885         wait_for_freeze_status(dd, 1);
6886
6887         /* SPC is now frozen */
6888
6889         /* do send PIO freeze steps */
6890         pio_freeze(dd);
6891
6892         /* do send DMA freeze steps */
6893         sdma_freeze(dd);
6894
6895         /* do send egress freeze steps - nothing to do */
6896
6897         /* do receive freeze steps */
6898         rxe_freeze(dd);
6899
6900         /*
6901          * Unfreeze the hardware - clear the freeze, wait for each
6902          * block's frozen bit to clear, then clear the frozen flag.
6903          */
6904         write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6905         wait_for_freeze_status(dd, 0);
6906
6907         if (is_ax(dd)) {
6908                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
6909                 wait_for_freeze_status(dd, 1);
6910                 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_UNFREEZE_SMASK);
6911                 wait_for_freeze_status(dd, 0);
6912         }
6913
6914         /* do send PIO unfreeze steps for kernel contexts */
6915         pio_kernel_unfreeze(dd);
6916
6917         /* do send DMA unfreeze steps */
6918         sdma_unfreeze(dd);
6919
6920         /* do send egress unfreeze steps - nothing to do */
6921
6922         /* do receive unfreeze steps for kernel contexts */
6923         rxe_kernel_unfreeze(dd);
6924
6925         /*
6926          * The unfreeze procedure touches global device registers when
6927          * it disables and re-enables RXE. Mark the device unfrozen
6928          * after all that is done so other parts of the driver waiting
6929          * for the device to unfreeze don't do things out of order.
6930          *
6931          * The above implies that the meaning of HFI1_FROZEN flag is
6932          * "Device has gone into freeze mode and freeze mode handling
6933          * is still in progress."
6934          *
6935          * The flag will be removed when freeze mode processing has
6936          * completed.
6937          */
6938         dd->flags &= ~HFI1_FROZEN;
6939         wake_up(&dd->event_queue);
6940
6941         /* no longer frozen */
6942 }
6943
6944 /**
6945  * update_xmit_counters - update PortXmitWait/PortVlXmitWait
6946  * counters.
6947  * @ppd: info of physical Hfi port
6948  * @link_width: new link width after link up or downgrade
6949  *
6950  * Update the PortXmitWait and PortVlXmitWait counters after
6951  * a link up or downgrade event to reflect a link width change.
6952  */
6953 static void update_xmit_counters(struct hfi1_pportdata *ppd, u16 link_width)
6954 {
6955         int i;
6956         u16 tx_width;
6957         u16 link_speed;
6958
6959         tx_width = tx_link_width(link_width);
6960         link_speed = get_link_speed(ppd->link_speed_active);
6961
6962         /*
6963          * There are C_VL_COUNT number of PortVLXmitWait counters.
6964          * Adding 1 to C_VL_COUNT to include the PortXmitWait counter.
6965          */
6966         for (i = 0; i < C_VL_COUNT + 1; i++)
6967                 get_xmit_wait_counters(ppd, tx_width, link_speed, i);
6968 }
6969
6970 /*
6971  * Handle a link up interrupt from the 8051.
6972  *
6973  * This is a work-queue function outside of the interrupt.
6974  */
6975 void handle_link_up(struct work_struct *work)
6976 {
6977         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
6978                                                   link_up_work);
6979         struct hfi1_devdata *dd = ppd->dd;
6980
6981         set_link_state(ppd, HLS_UP_INIT);
6982
6983         /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
6984         read_ltp_rtt(dd);
6985         /*
6986          * OPA specifies that certain counters are cleared on a transition
6987          * to link up, so do that.
6988          */
6989         clear_linkup_counters(dd);
6990         /*
6991          * And (re)set link up default values.
6992          */
6993         set_linkup_defaults(ppd);
6994
6995         /*
6996          * Set VL15 credits. Use cached value from verify cap interrupt.
6997          * In case of quick linkup or simulator, vl15 value will be set by
6998          * handle_linkup_change. VerifyCap interrupt handler will not be
6999          * called in those scenarios.
7000          */
7001         if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
7002                 set_up_vl15(dd, dd->vl15buf_cached);
7003
7004         /* enforce link speed enabled */
7005         if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
7006                 /* oops - current speed is not enabled, bounce */
7007                 dd_dev_err(dd,
7008                            "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
7009                            ppd->link_speed_active, ppd->link_speed_enabled);
7010                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
7011                                      OPA_LINKDOWN_REASON_SPEED_POLICY);
7012                 set_link_state(ppd, HLS_DN_OFFLINE);
7013                 start_link(ppd);
7014         }
7015 }
7016
7017 /*
7018  * Several pieces of LNI information were cached for SMA in ppd.
7019  * Reset these on link down
7020  */
7021 static void reset_neighbor_info(struct hfi1_pportdata *ppd)
7022 {
7023         ppd->neighbor_guid = 0;
7024         ppd->neighbor_port_number = 0;
7025         ppd->neighbor_type = 0;
7026         ppd->neighbor_fm_security = 0;
7027 }
7028
7029 static const char * const link_down_reason_strs[] = {
7030         [OPA_LINKDOWN_REASON_NONE] = "None",
7031         [OPA_LINKDOWN_REASON_RCV_ERROR_0] = "Receive error 0",
7032         [OPA_LINKDOWN_REASON_BAD_PKT_LEN] = "Bad packet length",
7033         [OPA_LINKDOWN_REASON_PKT_TOO_LONG] = "Packet too long",
7034         [OPA_LINKDOWN_REASON_PKT_TOO_SHORT] = "Packet too short",
7035         [OPA_LINKDOWN_REASON_BAD_SLID] = "Bad SLID",
7036         [OPA_LINKDOWN_REASON_BAD_DLID] = "Bad DLID",
7037         [OPA_LINKDOWN_REASON_BAD_L2] = "Bad L2",
7038         [OPA_LINKDOWN_REASON_BAD_SC] = "Bad SC",
7039         [OPA_LINKDOWN_REASON_RCV_ERROR_8] = "Receive error 8",
7040         [OPA_LINKDOWN_REASON_BAD_MID_TAIL] = "Bad mid tail",
7041         [OPA_LINKDOWN_REASON_RCV_ERROR_10] = "Receive error 10",
7042         [OPA_LINKDOWN_REASON_PREEMPT_ERROR] = "Preempt error",
7043         [OPA_LINKDOWN_REASON_PREEMPT_VL15] = "Preempt vl15",
7044         [OPA_LINKDOWN_REASON_BAD_VL_MARKER] = "Bad VL marker",
7045         [OPA_LINKDOWN_REASON_RCV_ERROR_14] = "Receive error 14",
7046         [OPA_LINKDOWN_REASON_RCV_ERROR_15] = "Receive error 15",
7047         [OPA_LINKDOWN_REASON_BAD_HEAD_DIST] = "Bad head distance",
7048         [OPA_LINKDOWN_REASON_BAD_TAIL_DIST] = "Bad tail distance",
7049         [OPA_LINKDOWN_REASON_BAD_CTRL_DIST] = "Bad control distance",
7050         [OPA_LINKDOWN_REASON_BAD_CREDIT_ACK] = "Bad credit ack",
7051         [OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER] = "Unsupported VL marker",
7052         [OPA_LINKDOWN_REASON_BAD_PREEMPT] = "Bad preempt",
7053         [OPA_LINKDOWN_REASON_BAD_CONTROL_FLIT] = "Bad control flit",
7054         [OPA_LINKDOWN_REASON_EXCEED_MULTICAST_LIMIT] = "Exceed multicast limit",
7055         [OPA_LINKDOWN_REASON_RCV_ERROR_24] = "Receive error 24",
7056         [OPA_LINKDOWN_REASON_RCV_ERROR_25] = "Receive error 25",
7057         [OPA_LINKDOWN_REASON_RCV_ERROR_26] = "Receive error 26",
7058         [OPA_LINKDOWN_REASON_RCV_ERROR_27] = "Receive error 27",
7059         [OPA_LINKDOWN_REASON_RCV_ERROR_28] = "Receive error 28",
7060         [OPA_LINKDOWN_REASON_RCV_ERROR_29] = "Receive error 29",
7061         [OPA_LINKDOWN_REASON_RCV_ERROR_30] = "Receive error 30",
7062         [OPA_LINKDOWN_REASON_EXCESSIVE_BUFFER_OVERRUN] =
7063                                         "Excessive buffer overrun",
7064         [OPA_LINKDOWN_REASON_UNKNOWN] = "Unknown",
7065         [OPA_LINKDOWN_REASON_REBOOT] = "Reboot",
7066         [OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN] = "Neighbor unknown",
7067         [OPA_LINKDOWN_REASON_FM_BOUNCE] = "FM bounce",
7068         [OPA_LINKDOWN_REASON_SPEED_POLICY] = "Speed policy",
7069         [OPA_LINKDOWN_REASON_WIDTH_POLICY] = "Width policy",
7070         [OPA_LINKDOWN_REASON_DISCONNECTED] = "Disconnected",
7071         [OPA_LINKDOWN_REASON_LOCAL_MEDIA_NOT_INSTALLED] =
7072                                         "Local media not installed",
7073         [OPA_LINKDOWN_REASON_NOT_INSTALLED] = "Not installed",
7074         [OPA_LINKDOWN_REASON_CHASSIS_CONFIG] = "Chassis config",
7075         [OPA_LINKDOWN_REASON_END_TO_END_NOT_INSTALLED] =
7076                                         "End to end not installed",
7077         [OPA_LINKDOWN_REASON_POWER_POLICY] = "Power policy",
7078         [OPA_LINKDOWN_REASON_LINKSPEED_POLICY] = "Link speed policy",
7079         [OPA_LINKDOWN_REASON_LINKWIDTH_POLICY] = "Link width policy",
7080         [OPA_LINKDOWN_REASON_SWITCH_MGMT] = "Switch management",
7081         [OPA_LINKDOWN_REASON_SMA_DISABLED] = "SMA disabled",
7082         [OPA_LINKDOWN_REASON_TRANSIENT] = "Transient"
7083 };
7084
7085 /* return the neighbor link down reason string */
7086 static const char *link_down_reason_str(u8 reason)
7087 {
7088         const char *str = NULL;
7089
7090         if (reason < ARRAY_SIZE(link_down_reason_strs))
7091                 str = link_down_reason_strs[reason];
7092         if (!str)
7093                 str = "(invalid)";
7094
7095         return str;
7096 }
7097
7098 /*
7099  * Handle a link down interrupt from the 8051.
7100  *
7101  * This is a work-queue function outside of the interrupt.
7102  */
7103 void handle_link_down(struct work_struct *work)
7104 {
7105         u8 lcl_reason, neigh_reason = 0;
7106         u8 link_down_reason;
7107         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7108                                                   link_down_work);
7109         int was_up;
7110         static const char ldr_str[] = "Link down reason: ";
7111
7112         if ((ppd->host_link_state &
7113              (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) &&
7114              ppd->port_type == PORT_TYPE_FIXED)
7115                 ppd->offline_disabled_reason =
7116                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NOT_INSTALLED);
7117
7118         /* Go offline first, then deal with reading/writing through 8051 */
7119         was_up = !!(ppd->host_link_state & HLS_UP);
7120         set_link_state(ppd, HLS_DN_OFFLINE);
7121         xchg(&ppd->is_link_down_queued, 0);
7122
7123         if (was_up) {
7124                 lcl_reason = 0;
7125                 /* link down reason is only valid if the link was up */
7126                 read_link_down_reason(ppd->dd, &link_down_reason);
7127                 switch (link_down_reason) {
7128                 case LDR_LINK_TRANSFER_ACTIVE_LOW:
7129                         /* the link went down, no idle message reason */
7130                         dd_dev_info(ppd->dd, "%sUnexpected link down\n",
7131                                     ldr_str);
7132                         break;
7133                 case LDR_RECEIVED_LINKDOWN_IDLE_MSG:
7134                         /*
7135                          * The neighbor reason is only valid if an idle message
7136                          * was received for it.
7137                          */
7138                         read_planned_down_reason_code(ppd->dd, &neigh_reason);
7139                         dd_dev_info(ppd->dd,
7140                                     "%sNeighbor link down message %d, %s\n",
7141                                     ldr_str, neigh_reason,
7142                                     link_down_reason_str(neigh_reason));
7143                         break;
7144                 case LDR_RECEIVED_HOST_OFFLINE_REQ:
7145                         dd_dev_info(ppd->dd,
7146                                     "%sHost requested link to go offline\n",
7147                                     ldr_str);
7148                         break;
7149                 default:
7150                         dd_dev_info(ppd->dd, "%sUnknown reason 0x%x\n",
7151                                     ldr_str, link_down_reason);
7152                         break;
7153                 }
7154
7155                 /*
7156                  * If no reason, assume peer-initiated but missed
7157                  * LinkGoingDown idle flits.
7158                  */
7159                 if (neigh_reason == 0)
7160                         lcl_reason = OPA_LINKDOWN_REASON_NEIGHBOR_UNKNOWN;
7161         } else {
7162                 /* went down while polling or going up */
7163                 lcl_reason = OPA_LINKDOWN_REASON_TRANSIENT;
7164         }
7165
7166         set_link_down_reason(ppd, lcl_reason, neigh_reason, 0);
7167
7168         /* inform the SMA when the link transitions from up to down */
7169         if (was_up && ppd->local_link_down_reason.sma == 0 &&
7170             ppd->neigh_link_down_reason.sma == 0) {
7171                 ppd->local_link_down_reason.sma =
7172                                         ppd->local_link_down_reason.latest;
7173                 ppd->neigh_link_down_reason.sma =
7174                                         ppd->neigh_link_down_reason.latest;
7175         }
7176
7177         reset_neighbor_info(ppd);
7178
7179         /* disable the port */
7180         clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
7181
7182         /*
7183          * If there is no cable attached, turn the DC off. Otherwise,
7184          * start the link bring up.
7185          */
7186         if (ppd->port_type == PORT_TYPE_QSFP && !qsfp_mod_present(ppd))
7187                 dc_shutdown(ppd->dd);
7188         else
7189                 start_link(ppd);
7190 }
7191
7192 void handle_link_bounce(struct work_struct *work)
7193 {
7194         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7195                                                         link_bounce_work);
7196
7197         /*
7198          * Only do something if the link is currently up.
7199          */
7200         if (ppd->host_link_state & HLS_UP) {
7201                 set_link_state(ppd, HLS_DN_OFFLINE);
7202                 start_link(ppd);
7203         } else {
7204                 dd_dev_info(ppd->dd, "%s: link not up (%s), nothing to do\n",
7205                             __func__, link_state_name(ppd->host_link_state));
7206         }
7207 }
7208
7209 /*
7210  * Mask conversion: Capability exchange to Port LTP.  The capability
7211  * exchange has an implicit 16b CRC that is mandatory.
7212  */
7213 static int cap_to_port_ltp(int cap)
7214 {
7215         int port_ltp = PORT_LTP_CRC_MODE_16; /* this mode is mandatory */
7216
7217         if (cap & CAP_CRC_14B)
7218                 port_ltp |= PORT_LTP_CRC_MODE_14;
7219         if (cap & CAP_CRC_48B)
7220                 port_ltp |= PORT_LTP_CRC_MODE_48;
7221         if (cap & CAP_CRC_12B_16B_PER_LANE)
7222                 port_ltp |= PORT_LTP_CRC_MODE_PER_LANE;
7223
7224         return port_ltp;
7225 }
7226
7227 /*
7228  * Convert an OPA Port LTP mask to capability mask
7229  */
7230 int port_ltp_to_cap(int port_ltp)
7231 {
7232         int cap_mask = 0;
7233
7234         if (port_ltp & PORT_LTP_CRC_MODE_14)
7235                 cap_mask |= CAP_CRC_14B;
7236         if (port_ltp & PORT_LTP_CRC_MODE_48)
7237                 cap_mask |= CAP_CRC_48B;
7238         if (port_ltp & PORT_LTP_CRC_MODE_PER_LANE)
7239                 cap_mask |= CAP_CRC_12B_16B_PER_LANE;
7240
7241         return cap_mask;
7242 }
7243
7244 /*
7245  * Convert a single DC LCB CRC mode to an OPA Port LTP mask.
7246  */
7247 static int lcb_to_port_ltp(int lcb_crc)
7248 {
7249         int port_ltp = 0;
7250
7251         if (lcb_crc == LCB_CRC_12B_16B_PER_LANE)
7252                 port_ltp = PORT_LTP_CRC_MODE_PER_LANE;
7253         else if (lcb_crc == LCB_CRC_48B)
7254                 port_ltp = PORT_LTP_CRC_MODE_48;
7255         else if (lcb_crc == LCB_CRC_14B)
7256                 port_ltp = PORT_LTP_CRC_MODE_14;
7257         else
7258                 port_ltp = PORT_LTP_CRC_MODE_16;
7259
7260         return port_ltp;
7261 }
7262
7263 static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
7264 {
7265         if (ppd->pkeys[2] != 0) {
7266                 ppd->pkeys[2] = 0;
7267                 (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
7268                 hfi1_event_pkey_change(ppd->dd, ppd->port);
7269         }
7270 }
7271
7272 /*
7273  * Convert the given link width to the OPA link width bitmask.
7274  */
7275 static u16 link_width_to_bits(struct hfi1_devdata *dd, u16 width)
7276 {
7277         switch (width) {
7278         case 0:
7279                 /*
7280                  * Simulator and quick linkup do not set the width.
7281                  * Just set it to 4x without complaint.
7282                  */
7283                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR || quick_linkup)
7284                         return OPA_LINK_WIDTH_4X;
7285                 return 0; /* no lanes up */
7286         case 1: return OPA_LINK_WIDTH_1X;
7287         case 2: return OPA_LINK_WIDTH_2X;
7288         case 3: return OPA_LINK_WIDTH_3X;
7289         default:
7290                 dd_dev_info(dd, "%s: invalid width %d, using 4\n",
7291                             __func__, width);
7292                 /* fall through */
7293         case 4: return OPA_LINK_WIDTH_4X;
7294         }
7295 }
7296
7297 /*
7298  * Do a population count on the bottom nibble.
7299  */
7300 static const u8 bit_counts[16] = {
7301         0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
7302 };
7303
7304 static inline u8 nibble_to_count(u8 nibble)
7305 {
7306         return bit_counts[nibble & 0xf];
7307 }
7308
7309 /*
7310  * Read the active lane information from the 8051 registers and return
7311  * their widths.
7312  *
7313  * Active lane information is found in these 8051 registers:
7314  *      enable_lane_tx
7315  *      enable_lane_rx
7316  */
7317 static void get_link_widths(struct hfi1_devdata *dd, u16 *tx_width,
7318                             u16 *rx_width)
7319 {
7320         u16 tx, rx;
7321         u8 enable_lane_rx;
7322         u8 enable_lane_tx;
7323         u8 tx_polarity_inversion;
7324         u8 rx_polarity_inversion;
7325         u8 max_rate;
7326
7327         /* read the active lanes */
7328         read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
7329                          &rx_polarity_inversion, &max_rate);
7330         read_local_lni(dd, &enable_lane_rx);
7331
7332         /* convert to counts */
7333         tx = nibble_to_count(enable_lane_tx);
7334         rx = nibble_to_count(enable_lane_rx);
7335
7336         /*
7337          * Set link_speed_active here, overriding what was set in
7338          * handle_verify_cap().  The ASIC 8051 firmware does not correctly
7339          * set the max_rate field in handle_verify_cap until v0.19.
7340          */
7341         if ((dd->icode == ICODE_RTL_SILICON) &&
7342             (dd->dc8051_ver < dc8051_ver(0, 19, 0))) {
7343                 /* max_rate: 0 = 12.5G, 1 = 25G */
7344                 switch (max_rate) {
7345                 case 0:
7346                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_12_5G;
7347                         break;
7348                 default:
7349                         dd_dev_err(dd,
7350                                    "%s: unexpected max rate %d, using 25Gb\n",
7351                                    __func__, (int)max_rate);
7352                         /* fall through */
7353                 case 1:
7354                         dd->pport[0].link_speed_active = OPA_LINK_SPEED_25G;
7355                         break;
7356                 }
7357         }
7358
7359         dd_dev_info(dd,
7360                     "Fabric active lanes (width): tx 0x%x (%d), rx 0x%x (%d)\n",
7361                     enable_lane_tx, tx, enable_lane_rx, rx);
7362         *tx_width = link_width_to_bits(dd, tx);
7363         *rx_width = link_width_to_bits(dd, rx);
7364 }
7365
7366 /*
7367  * Read verify_cap_local_fm_link_width[1] to obtain the link widths.
7368  * Valid after the end of VerifyCap and during LinkUp.  Does not change
7369  * after link up.  I.e. look elsewhere for downgrade information.
7370  *
7371  * Bits are:
7372  *      + bits [7:4] contain the number of active transmitters
7373  *      + bits [3:0] contain the number of active receivers
7374  * These are numbers 1 through 4 and can be different values if the
7375  * link is asymmetric.
7376  *
7377  * verify_cap_local_fm_link_width[0] retains its original value.
7378  */
7379 static void get_linkup_widths(struct hfi1_devdata *dd, u16 *tx_width,
7380                               u16 *rx_width)
7381 {
7382         u16 widths, tx, rx;
7383         u8 misc_bits, local_flags;
7384         u16 active_tx, active_rx;
7385
7386         read_vc_local_link_mode(dd, &misc_bits, &local_flags, &widths);
7387         tx = widths >> 12;
7388         rx = (widths >> 8) & 0xf;
7389
7390         *tx_width = link_width_to_bits(dd, tx);
7391         *rx_width = link_width_to_bits(dd, rx);
7392
7393         /* print the active widths */
7394         get_link_widths(dd, &active_tx, &active_rx);
7395 }
7396
7397 /*
7398  * Set ppd->link_width_active and ppd->link_width_downgrade_active using
7399  * hardware information when the link first comes up.
7400  *
7401  * The link width is not available until after VerifyCap.AllFramesReceived
7402  * (the trigger for handle_verify_cap), so this is outside that routine
7403  * and should be called when the 8051 signals linkup.
7404  */
7405 void get_linkup_link_widths(struct hfi1_pportdata *ppd)
7406 {
7407         u16 tx_width, rx_width;
7408
7409         /* get end-of-LNI link widths */
7410         get_linkup_widths(ppd->dd, &tx_width, &rx_width);
7411
7412         /* use tx_width as the link is supposed to be symmetric on link up */
7413         ppd->link_width_active = tx_width;
7414         /* link width downgrade active (LWD.A) starts out matching LW.A */
7415         ppd->link_width_downgrade_tx_active = ppd->link_width_active;
7416         ppd->link_width_downgrade_rx_active = ppd->link_width_active;
7417         /* per OPA spec, on link up LWD.E resets to LWD.S */
7418         ppd->link_width_downgrade_enabled = ppd->link_width_downgrade_supported;
7419         /* cache the active egress rate (units {10^6 bits/sec]) */
7420         ppd->current_egress_rate = active_egress_rate(ppd);
7421 }
7422
7423 /*
7424  * Handle a verify capabilities interrupt from the 8051.
7425  *
7426  * This is a work-queue function outside of the interrupt.
7427  */
7428 void handle_verify_cap(struct work_struct *work)
7429 {
7430         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7431                                                                 link_vc_work);
7432         struct hfi1_devdata *dd = ppd->dd;
7433         u64 reg;
7434         u8 power_management;
7435         u8 continuous;
7436         u8 vcu;
7437         u8 vau;
7438         u8 z;
7439         u16 vl15buf;
7440         u16 link_widths;
7441         u16 crc_mask;
7442         u16 crc_val;
7443         u16 device_id;
7444         u16 active_tx, active_rx;
7445         u8 partner_supported_crc;
7446         u8 remote_tx_rate;
7447         u8 device_rev;
7448
7449         set_link_state(ppd, HLS_VERIFY_CAP);
7450
7451         lcb_shutdown(dd, 0);
7452         adjust_lcb_for_fpga_serdes(dd);
7453
7454         read_vc_remote_phy(dd, &power_management, &continuous);
7455         read_vc_remote_fabric(dd, &vau, &z, &vcu, &vl15buf,
7456                               &partner_supported_crc);
7457         read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
7458         read_remote_device_id(dd, &device_id, &device_rev);
7459
7460         /* print the active widths */
7461         get_link_widths(dd, &active_tx, &active_rx);
7462         dd_dev_info(dd,
7463                     "Peer PHY: power management 0x%x, continuous updates 0x%x\n",
7464                     (int)power_management, (int)continuous);
7465         dd_dev_info(dd,
7466                     "Peer Fabric: vAU %d, Z %d, vCU %d, vl15 credits 0x%x, CRC sizes 0x%x\n",
7467                     (int)vau, (int)z, (int)vcu, (int)vl15buf,
7468                     (int)partner_supported_crc);
7469         dd_dev_info(dd, "Peer Link Width: tx rate 0x%x, widths 0x%x\n",
7470                     (u32)remote_tx_rate, (u32)link_widths);
7471         dd_dev_info(dd, "Peer Device ID: 0x%04x, Revision 0x%02x\n",
7472                     (u32)device_id, (u32)device_rev);
7473         /*
7474          * The peer vAU value just read is the peer receiver value.  HFI does
7475          * not support a transmit vAU of 0 (AU == 8).  We advertised that
7476          * with Z=1 in the fabric capabilities sent to the peer.  The peer
7477          * will see our Z=1, and, if it advertised a vAU of 0, will move its
7478          * receive to vAU of 1 (AU == 16).  Do the same here.  We do not care
7479          * about the peer Z value - our sent vAU is 3 (hardwired) and is not
7480          * subject to the Z value exception.
7481          */
7482         if (vau == 0)
7483                 vau = 1;
7484         set_up_vau(dd, vau);
7485
7486         /*
7487          * Set VL15 credits to 0 in global credit register. Cache remote VL15
7488          * credits value and wait for link-up interrupt ot set it.
7489          */
7490         set_up_vl15(dd, 0);
7491         dd->vl15buf_cached = vl15buf;
7492
7493         /* set up the LCB CRC mode */
7494         crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;
7495
7496         /* order is important: use the lowest bit in common */
7497         if (crc_mask & CAP_CRC_14B)
7498                 crc_val = LCB_CRC_14B;
7499         else if (crc_mask & CAP_CRC_48B)
7500                 crc_val = LCB_CRC_48B;
7501         else if (crc_mask & CAP_CRC_12B_16B_PER_LANE)
7502                 crc_val = LCB_CRC_12B_16B_PER_LANE;
7503         else
7504                 crc_val = LCB_CRC_16B;
7505
7506         dd_dev_info(dd, "Final LCB CRC mode: %d\n", (int)crc_val);
7507         write_csr(dd, DC_LCB_CFG_CRC_MODE,
7508                   (u64)crc_val << DC_LCB_CFG_CRC_MODE_TX_VAL_SHIFT);
7509
7510         /* set (14b only) or clear sideband credit */
7511         reg = read_csr(dd, SEND_CM_CTRL);
7512         if (crc_val == LCB_CRC_14B && crc_14b_sideband) {
7513                 write_csr(dd, SEND_CM_CTRL,
7514                           reg | SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7515         } else {
7516                 write_csr(dd, SEND_CM_CTRL,
7517                           reg & ~SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK);
7518         }
7519
7520         ppd->link_speed_active = 0;     /* invalid value */
7521         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
7522                 /* remote_tx_rate: 0 = 12.5G, 1 = 25G */
7523                 switch (remote_tx_rate) {
7524                 case 0:
7525                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7526                         break;
7527                 case 1:
7528                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7529                         break;
7530                 }
7531         } else {
7532                 /* actual rate is highest bit of the ANDed rates */
7533                 u8 rate = remote_tx_rate & ppd->local_tx_rate;
7534
7535                 if (rate & 2)
7536                         ppd->link_speed_active = OPA_LINK_SPEED_25G;
7537                 else if (rate & 1)
7538                         ppd->link_speed_active = OPA_LINK_SPEED_12_5G;
7539         }
7540         if (ppd->link_speed_active == 0) {
7541                 dd_dev_err(dd, "%s: unexpected remote tx rate %d, using 25Gb\n",
7542                            __func__, (int)remote_tx_rate);
7543                 ppd->link_speed_active = OPA_LINK_SPEED_25G;
7544         }
7545
7546         /*
7547          * Cache the values of the supported, enabled, and active
7548          * LTP CRC modes to return in 'portinfo' queries. But the bit
7549          * flags that are returned in the portinfo query differ from
7550          * what's in the link_crc_mask, crc_sizes, and crc_val
7551          * variables. Convert these here.
7552          */
7553         ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
7554                 /* supported crc modes */
7555         ppd->port_ltp_crc_mode |=
7556                 cap_to_port_ltp(ppd->port_crc_mode_enabled) << 4;
7557                 /* enabled crc modes */
7558         ppd->port_ltp_crc_mode |= lcb_to_port_ltp(crc_val);
7559                 /* active crc mode */
7560
7561         /* set up the remote credit return table */
7562         assign_remote_cm_au_table(dd, vcu);
7563
7564         /*
7565          * The LCB is reset on entry to handle_verify_cap(), so this must
7566          * be applied on every link up.
7567          *
7568          * Adjust LCB error kill enable to kill the link if
7569          * these RBUF errors are seen:
7570          *      REPLAY_BUF_MBE_SMASK
7571          *      FLIT_INPUT_BUF_MBE_SMASK
7572          */
7573         if (is_ax(dd)) {                        /* fixed in B0 */
7574                 reg = read_csr(dd, DC_LCB_CFG_LINK_KILL_EN);
7575                 reg |= DC_LCB_CFG_LINK_KILL_EN_REPLAY_BUF_MBE_SMASK
7576                         | DC_LCB_CFG_LINK_KILL_EN_FLIT_INPUT_BUF_MBE_SMASK;
7577                 write_csr(dd, DC_LCB_CFG_LINK_KILL_EN, reg);
7578         }
7579
7580         /* pull LCB fifos out of reset - all fifo clocks must be stable */
7581         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
7582
7583         /* give 8051 access to the LCB CSRs */
7584         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
7585         set_8051_lcb_access(dd);
7586
7587         /* tell the 8051 to go to LinkUp */
7588         set_link_state(ppd, HLS_GOING_UP);
7589 }
7590
7591 /**
7592  * apply_link_downgrade_policy - Apply the link width downgrade enabled
7593  * policy against the current active link widths.
7594  * @ppd: info of physical Hfi port
7595  * @refresh_widths: True indicates link downgrade event
7596  * @return: True indicates a successful link downgrade. False indicates
7597  *          link downgrade event failed and the link will bounce back to
7598  *          default link width.
7599  *
7600  * Called when the enabled policy changes or the active link widths
7601  * change.
7602  * Refresh_widths indicates that a link downgrade occurred. The
7603  * link_downgraded variable is set by refresh_widths and
7604  * determines the success/failure of the policy application.
7605  */
7606 bool apply_link_downgrade_policy(struct hfi1_pportdata *ppd,
7607                                  bool refresh_widths)
7608 {
7609         int do_bounce = 0;
7610         int tries;
7611         u16 lwde;
7612         u16 tx, rx;
7613         bool link_downgraded = refresh_widths;
7614
7615         /* use the hls lock to avoid a race with actual link up */
7616         tries = 0;
7617 retry:
7618         mutex_lock(&ppd->hls_lock);
7619         /* only apply if the link is up */
7620         if (ppd->host_link_state & HLS_DOWN) {
7621                 /* still going up..wait and retry */
7622                 if (ppd->host_link_state & HLS_GOING_UP) {
7623                         if (++tries < 1000) {
7624                                 mutex_unlock(&ppd->hls_lock);
7625                                 usleep_range(100, 120); /* arbitrary */
7626                                 goto retry;
7627                         }
7628                         dd_dev_err(ppd->dd,
7629                                    "%s: giving up waiting for link state change\n",
7630                                    __func__);
7631                 }
7632                 goto done;
7633         }
7634
7635         lwde = ppd->link_width_downgrade_enabled;
7636
7637         if (refresh_widths) {
7638                 get_link_widths(ppd->dd, &tx, &rx);
7639                 ppd->link_width_downgrade_tx_active = tx;
7640                 ppd->link_width_downgrade_rx_active = rx;
7641         }
7642
7643         if (ppd->link_width_downgrade_tx_active == 0 ||
7644             ppd->link_width_downgrade_rx_active == 0) {
7645                 /* the 8051 reported a dead link as a downgrade */
7646                 dd_dev_err(ppd->dd, "Link downgrade is really a link down, ignoring\n");
7647                 link_downgraded = false;
7648         } else if (lwde == 0) {
7649                 /* downgrade is disabled */
7650
7651                 /* bounce if not at starting active width */
7652                 if ((ppd->link_width_active !=
7653                      ppd->link_width_downgrade_tx_active) ||
7654                     (ppd->link_width_active !=
7655                      ppd->link_width_downgrade_rx_active)) {
7656                         dd_dev_err(ppd->dd,
7657                                    "Link downgrade is disabled and link has downgraded, downing link\n");
7658                         dd_dev_err(ppd->dd,
7659                                    "  original 0x%x, tx active 0x%x, rx active 0x%x\n",
7660                                    ppd->link_width_active,
7661                                    ppd->link_width_downgrade_tx_active,
7662                                    ppd->link_width_downgrade_rx_active);
7663                         do_bounce = 1;
7664                         link_downgraded = false;
7665                 }
7666         } else if ((lwde & ppd->link_width_downgrade_tx_active) == 0 ||
7667                    (lwde & ppd->link_width_downgrade_rx_active) == 0) {
7668                 /* Tx or Rx is outside the enabled policy */
7669                 dd_dev_err(ppd->dd,
7670                            "Link is outside of downgrade allowed, downing link\n");
7671                 dd_dev_err(ppd->dd,
7672                            "  enabled 0x%x, tx active 0x%x, rx active 0x%x\n",
7673                            lwde, ppd->link_width_downgrade_tx_active,
7674                            ppd->link_width_downgrade_rx_active);
7675                 do_bounce = 1;
7676                 link_downgraded = false;
7677         }
7678
7679 done:
7680         mutex_unlock(&ppd->hls_lock);
7681
7682         if (do_bounce) {
7683                 set_link_down_reason(ppd, OPA_LINKDOWN_REASON_WIDTH_POLICY, 0,
7684                                      OPA_LINKDOWN_REASON_WIDTH_POLICY);
7685                 set_link_state(ppd, HLS_DN_OFFLINE);
7686                 start_link(ppd);
7687         }
7688
7689         return link_downgraded;
7690 }
7691
7692 /*
7693  * Handle a link downgrade interrupt from the 8051.
7694  *
7695  * This is a work-queue function outside of the interrupt.
7696  */
7697 void handle_link_downgrade(struct work_struct *work)
7698 {
7699         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
7700                                                         link_downgrade_work);
7701
7702         dd_dev_info(ppd->dd, "8051: Link width downgrade\n");
7703         if (apply_link_downgrade_policy(ppd, true))
7704                 update_xmit_counters(ppd, ppd->link_width_downgrade_tx_active);
7705 }
7706
7707 static char *dcc_err_string(char *buf, int buf_len, u64 flags)
7708 {
7709         return flag_string(buf, buf_len, flags, dcc_err_flags,
7710                 ARRAY_SIZE(dcc_err_flags));
7711 }
7712
7713 static char *lcb_err_string(char *buf, int buf_len, u64 flags)
7714 {
7715         return flag_string(buf, buf_len, flags, lcb_err_flags,
7716                 ARRAY_SIZE(lcb_err_flags));
7717 }
7718
7719 static char *dc8051_err_string(char *buf, int buf_len, u64 flags)
7720 {
7721         return flag_string(buf, buf_len, flags, dc8051_err_flags,
7722                 ARRAY_SIZE(dc8051_err_flags));
7723 }
7724
7725 static char *dc8051_info_err_string(char *buf, int buf_len, u64 flags)
7726 {
7727         return flag_string(buf, buf_len, flags, dc8051_info_err_flags,
7728                 ARRAY_SIZE(dc8051_info_err_flags));
7729 }
7730
7731 static char *dc8051_info_host_msg_string(char *buf, int buf_len, u64 flags)
7732 {
7733         return flag_string(buf, buf_len, flags, dc8051_info_host_msg_flags,
7734                 ARRAY_SIZE(dc8051_info_host_msg_flags));
7735 }
7736
7737 static void handle_8051_interrupt(struct hfi1_devdata *dd, u32 unused, u64 reg)
7738 {
7739         struct hfi1_pportdata *ppd = dd->pport;
7740         u64 info, err, host_msg;
7741         int queue_link_down = 0;
7742         char buf[96];
7743
7744         /* look at the flags */
7745         if (reg & DC_DC8051_ERR_FLG_SET_BY_8051_SMASK) {
7746                 /* 8051 information set by firmware */
7747                 /* read DC8051_DBG_ERR_INFO_SET_BY_8051 for details */
7748                 info = read_csr(dd, DC_DC8051_DBG_ERR_INFO_SET_BY_8051);
7749                 err = (info >> DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_SHIFT)
7750                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_ERROR_MASK;
7751                 host_msg = (info >>
7752                         DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_SHIFT)
7753                         & DC_DC8051_DBG_ERR_INFO_SET_BY_8051_HOST_MSG_MASK;
7754
7755                 /*
7756                  * Handle error flags.
7757                  */
7758                 if (err & FAILED_LNI) {
7759                         /*
7760                          * LNI error indications are cleared by the 8051
7761                          * only when starting polling.  Only pay attention
7762                          * to them when in the states that occur during
7763                          * LNI.
7764                          */
7765                         if (ppd->host_link_state
7766                             & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
7767                                 queue_link_down = 1;
7768                                 dd_dev_info(dd, "Link error: %s\n",
7769                                             dc8051_info_err_string(buf,
7770                                                                    sizeof(buf),
7771                                                                    err &
7772                                                                    FAILED_LNI));
7773                         }
7774                         err &= ~(u64)FAILED_LNI;
7775                 }
7776                 /* unknown frames can happen durning LNI, just count */
7777                 if (err & UNKNOWN_FRAME) {
7778                         ppd->unknown_frame_count++;
7779                         err &= ~(u64)UNKNOWN_FRAME;
7780                 }
7781                 if (err) {
7782                         /* report remaining errors, but do not do anything */
7783                         dd_dev_err(dd, "8051 info error: %s\n",
7784                                    dc8051_info_err_string(buf, sizeof(buf),
7785                                                           err));
7786                 }
7787
7788                 /*
7789                  * Handle host message flags.
7790                  */
7791                 if (host_msg & HOST_REQ_DONE) {
7792                         /*
7793                          * Presently, the driver does a busy wait for
7794                          * host requests to complete.  This is only an
7795                          * informational message.
7796                          * NOTE: The 8051 clears the host message
7797                          * information *on the next 8051 command*.
7798                          * Therefore, when linkup is achieved,
7799                          * this flag will still be set.
7800                          */
7801                         host_msg &= ~(u64)HOST_REQ_DONE;
7802                 }
7803                 if (host_msg & BC_SMA_MSG) {
7804                         queue_work(ppd->link_wq, &ppd->sma_message_work);
7805                         host_msg &= ~(u64)BC_SMA_MSG;
7806                 }
7807                 if (host_msg & LINKUP_ACHIEVED) {
7808                         dd_dev_info(dd, "8051: Link up\n");
7809                         queue_work(ppd->link_wq, &ppd->link_up_work);
7810                         host_msg &= ~(u64)LINKUP_ACHIEVED;
7811                 }
7812                 if (host_msg & EXT_DEVICE_CFG_REQ) {
7813                         handle_8051_request(ppd);
7814                         host_msg &= ~(u64)EXT_DEVICE_CFG_REQ;
7815                 }
7816                 if (host_msg & VERIFY_CAP_FRAME) {
7817                         queue_work(ppd->link_wq, &ppd->link_vc_work);
7818                         host_msg &= ~(u64)VERIFY_CAP_FRAME;
7819                 }
7820                 if (host_msg & LINK_GOING_DOWN) {
7821                         const char *extra = "";
7822                         /* no downgrade action needed if going down */
7823                         if (host_msg & LINK_WIDTH_DOWNGRADED) {
7824                                 host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7825                                 extra = " (ignoring downgrade)";
7826                         }
7827                         dd_dev_info(dd, "8051: Link down%s\n", extra);
7828                         queue_link_down = 1;
7829                         host_msg &= ~(u64)LINK_GOING_DOWN;
7830                 }
7831                 if (host_msg & LINK_WIDTH_DOWNGRADED) {
7832                         queue_work(ppd->link_wq, &ppd->link_downgrade_work);
7833                         host_msg &= ~(u64)LINK_WIDTH_DOWNGRADED;
7834                 }
7835                 if (host_msg) {
7836                         /* report remaining messages, but do not do anything */
7837                         dd_dev_info(dd, "8051 info host message: %s\n",
7838                                     dc8051_info_host_msg_string(buf,
7839                                                                 sizeof(buf),
7840                                                                 host_msg));
7841                 }
7842
7843                 reg &= ~DC_DC8051_ERR_FLG_SET_BY_8051_SMASK;
7844         }
7845         if (reg & DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK) {
7846                 /*
7847                  * Lost the 8051 heartbeat.  If this happens, we
7848                  * receive constant interrupts about it.  Disable
7849                  * the interrupt after the first.
7850                  */
7851                 dd_dev_err(dd, "Lost 8051 heartbeat\n");
7852                 write_csr(dd, DC_DC8051_ERR_EN,
7853                           read_csr(dd, DC_DC8051_ERR_EN) &
7854                           ~DC_DC8051_ERR_EN_LOST_8051_HEART_BEAT_SMASK);
7855
7856                 reg &= ~DC_DC8051_ERR_FLG_LOST_8051_HEART_BEAT_SMASK;
7857         }
7858         if (reg) {
7859                 /* report the error, but do not do anything */
7860                 dd_dev_err(dd, "8051 error: %s\n",
7861                            dc8051_err_string(buf, sizeof(buf), reg));
7862         }
7863
7864         if (queue_link_down) {
7865                 /*
7866                  * if the link is already going down or disabled, do not
7867                  * queue another. If there's a link down entry already
7868                  * queued, don't queue another one.
7869                  */
7870                 if ((ppd->host_link_state &
7871                     (HLS_GOING_OFFLINE | HLS_LINK_COOLDOWN)) ||
7872                     ppd->link_enabled == 0) {
7873                         dd_dev_info(dd, "%s: not queuing link down. host_link_state %x, link_enabled %x\n",
7874                                     __func__, ppd->host_link_state,
7875                                     ppd->link_enabled);
7876                 } else {
7877                         if (xchg(&ppd->is_link_down_queued, 1) == 1)
7878                                 dd_dev_info(dd,
7879                                             "%s: link down request already queued\n",
7880                                             __func__);
7881                         else
7882                                 queue_work(ppd->link_wq, &ppd->link_down_work);
7883                 }
7884         }
7885 }
7886
7887 static const char * const fm_config_txt[] = {
7888 [0] =
7889         "BadHeadDist: Distance violation between two head flits",
7890 [1] =
7891         "BadTailDist: Distance violation between two tail flits",
7892 [2] =
7893         "BadCtrlDist: Distance violation between two credit control flits",
7894 [3] =
7895         "BadCrdAck: Credits return for unsupported VL",
7896 [4] =
7897         "UnsupportedVLMarker: Received VL Marker",
7898 [5] =
7899         "BadPreempt: Exceeded the preemption nesting level",
7900 [6] =
7901         "BadControlFlit: Received unsupported control flit",
7902 /* no 7 */
7903 [8] =
7904         "UnsupportedVLMarker: Received VL Marker for unconfigured or disabled VL",
7905 };
7906
7907 static const char * const port_rcv_txt[] = {
7908 [1] =
7909         "BadPktLen: Illegal PktLen",
7910 [2] =
7911         "PktLenTooLong: Packet longer than PktLen",
7912 [3] =
7913         "PktLenTooShort: Packet shorter than PktLen",
7914 [4] =
7915         "BadSLID: Illegal SLID (0, using multicast as SLID, does not include security validation of SLID)",
7916 [5] =
7917         "BadDLID: Illegal DLID (0, doesn't match HFI)",
7918 [6] =
7919         "BadL2: Illegal L2 opcode",
7920 [7] =
7921         "BadSC: Unsupported SC",
7922 [9] =
7923         "BadRC: Illegal RC",
7924 [11] =
7925         "PreemptError: Preempting with same VL",
7926 [12] =
7927         "PreemptVL15: Preempting a VL15 packet",
7928 };
7929
7930 #define OPA_LDR_FMCONFIG_OFFSET 16
7931 #define OPA_LDR_PORTRCV_OFFSET 0
7932 static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
7933 {
7934         u64 info, hdr0, hdr1;
7935         const char *extra;
7936         char buf[96];
7937         struct hfi1_pportdata *ppd = dd->pport;
7938         u8 lcl_reason = 0;
7939         int do_bounce = 0;
7940
7941         if (reg & DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK) {
7942                 if (!(dd->err_info_uncorrectable & OPA_EI_STATUS_SMASK)) {
7943                         info = read_csr(dd, DCC_ERR_INFO_UNCORRECTABLE);
7944                         dd->err_info_uncorrectable = info & OPA_EI_CODE_SMASK;
7945                         /* set status bit */
7946                         dd->err_info_uncorrectable |= OPA_EI_STATUS_SMASK;
7947                 }
7948                 reg &= ~DCC_ERR_FLG_UNCORRECTABLE_ERR_SMASK;
7949         }
7950
7951         if (reg & DCC_ERR_FLG_LINK_ERR_SMASK) {
7952                 struct hfi1_pportdata *ppd = dd->pport;
7953                 /* this counter saturates at (2^32) - 1 */
7954                 if (ppd->link_downed < (u32)UINT_MAX)
7955                         ppd->link_downed++;
7956                 reg &= ~DCC_ERR_FLG_LINK_ERR_SMASK;
7957         }
7958
7959         if (reg & DCC_ERR_FLG_FMCONFIG_ERR_SMASK) {
7960                 u8 reason_valid = 1;
7961
7962                 info = read_csr(dd, DCC_ERR_INFO_FMCONFIG);
7963                 if (!(dd->err_info_fmconfig & OPA_EI_STATUS_SMASK)) {
7964                         dd->err_info_fmconfig = info & OPA_EI_CODE_SMASK;
7965                         /* set status bit */
7966                         dd->err_info_fmconfig |= OPA_EI_STATUS_SMASK;
7967                 }
7968                 switch (info) {
7969                 case 0:
7970                 case 1:
7971                 case 2:
7972                 case 3:
7973                 case 4:
7974                 case 5:
7975                 case 6:
7976                         extra = fm_config_txt[info];
7977                         break;
7978                 case 8:
7979                         extra = fm_config_txt[info];
7980                         if (ppd->port_error_action &
7981                             OPA_PI_MASK_FM_CFG_UNSUPPORTED_VL_MARKER) {
7982                                 do_bounce = 1;
7983                                 /*
7984                                  * lcl_reason cannot be derived from info
7985                                  * for this error
7986                                  */
7987                                 lcl_reason =
7988                                   OPA_LINKDOWN_REASON_UNSUPPORTED_VL_MARKER;
7989                         }
7990                         break;
7991                 default:
7992                         reason_valid = 0;
7993                         snprintf(buf, sizeof(buf), "reserved%lld", info);
7994                         extra = buf;
7995                         break;
7996                 }
7997
7998                 if (reason_valid && !do_bounce) {
7999                         do_bounce = ppd->port_error_action &
8000                                         (1 << (OPA_LDR_FMCONFIG_OFFSET + info));
8001                         lcl_reason = info + OPA_LINKDOWN_REASON_BAD_HEAD_DIST;
8002                 }
8003
8004                 /* just report this */
8005                 dd_dev_info_ratelimited(dd, "DCC Error: fmconfig error: %s\n",
8006                                         extra);
8007                 reg &= ~DCC_ERR_FLG_FMCONFIG_ERR_SMASK;
8008         }
8009
8010         if (reg & DCC_ERR_FLG_RCVPORT_ERR_SMASK) {
8011                 u8 reason_valid = 1;
8012
8013                 info = read_csr(dd, DCC_ERR_INFO_PORTRCV);
8014                 hdr0 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR0);
8015                 hdr1 = read_csr(dd, DCC_ERR_INFO_PORTRCV_HDR1);
8016                 if (!(dd->err_info_rcvport.status_and_code &
8017                       OPA_EI_STATUS_SMASK)) {
8018                         dd->err_info_rcvport.status_and_code =
8019                                 info & OPA_EI_CODE_SMASK;
8020                         /* set status bit */
8021                         dd->err_info_rcvport.status_and_code |=
8022                                 OPA_EI_STATUS_SMASK;
8023                         /*
8024                          * save first 2 flits in the packet that caused
8025                          * the error
8026                          */
8027                         dd->err_info_rcvport.packet_flit1 = hdr0;
8028                         dd->err_info_rcvport.packet_flit2 = hdr1;
8029                 }
8030                 switch (info) {
8031                 case 1:
8032                 case 2:
8033                 case 3:
8034                 case 4:
8035                 case 5:
8036                 case 6:
8037                 case 7:
8038                 case 9:
8039                 case 11:
8040                 case 12:
8041                         extra = port_rcv_txt[info];
8042                         break;
8043                 default:
8044                         reason_valid = 0;
8045                         snprintf(buf, sizeof(buf), "reserved%lld", info);
8046                         extra = buf;
8047                         break;
8048                 }
8049
8050                 if (reason_valid && !do_bounce) {
8051                         do_bounce = ppd->port_error_action &
8052                                         (1 << (OPA_LDR_PORTRCV_OFFSET + info));
8053                         lcl_reason = info + OPA_LINKDOWN_REASON_RCV_ERROR_0;
8054                 }
8055
8056                 /* just report this */
8057                 dd_dev_info_ratelimited(dd, "DCC Error: PortRcv error: %s\n"
8058                                         "               hdr0 0x%llx, hdr1 0x%llx\n",
8059                                         extra, hdr0, hdr1);
8060
8061                 reg &= ~DCC_ERR_FLG_RCVPORT_ERR_SMASK;
8062         }
8063
8064         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK) {
8065                 /* informative only */
8066                 dd_dev_info_ratelimited(dd, "8051 access to LCB blocked\n");
8067                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_UC_SMASK;
8068         }
8069         if (reg & DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK) {
8070                 /* informative only */
8071                 dd_dev_info_ratelimited(dd, "host access to LCB blocked\n");
8072                 reg &= ~DCC_ERR_FLG_EN_CSR_ACCESS_BLOCKED_HOST_SMASK;
8073         }
8074
8075         if (unlikely(hfi1_dbg_fault_suppress_err(&dd->verbs_dev)))
8076                 reg &= ~DCC_ERR_FLG_LATE_EBP_ERR_SMASK;
8077
8078         /* report any remaining errors */
8079         if (reg)
8080                 dd_dev_info_ratelimited(dd, "DCC Error: %s\n",
8081                                         dcc_err_string(buf, sizeof(buf), reg));
8082
8083         if (lcl_reason == 0)
8084                 lcl_reason = OPA_LINKDOWN_REASON_UNKNOWN;
8085
8086         if (do_bounce) {
8087                 dd_dev_info_ratelimited(dd, "%s: PortErrorAction bounce\n",
8088                                         __func__);
8089                 set_link_down_reason(ppd, lcl_reason, 0, lcl_reason);
8090                 queue_work(ppd->link_wq, &ppd->link_bounce_work);
8091         }
8092 }
8093
8094 static void handle_lcb_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
8095 {
8096         char buf[96];
8097
8098         dd_dev_info(dd, "LCB Error: %s\n",
8099                     lcb_err_string(buf, sizeof(buf), reg));
8100 }
8101
8102 /*
8103  * CCE block DC interrupt.  Source is < 8.
8104  */
8105 static void is_dc_int(struct hfi1_devdata *dd, unsigned int source)
8106 {
8107         const struct err_reg_info *eri = &dc_errs[source];
8108
8109         if (eri->handler) {
8110                 interrupt_clear_down(dd, 0, eri);
8111         } else if (source == 3 /* dc_lbm_int */) {
8112                 /*
8113                  * This indicates that a parity error has occurred on the
8114                  * address/control lines presented to the LBM.  The error
8115                  * is a single pulse, there is no associated error flag,
8116                  * and it is non-maskable.  This is because if a parity
8117                  * error occurs on the request the request is dropped.
8118                  * This should never occur, but it is nice to know if it
8119                  * ever does.
8120                  */
8121                 dd_dev_err(dd, "Parity error in DC LBM block\n");
8122         } else {
8123                 dd_dev_err(dd, "Invalid DC interrupt %u\n", source);
8124         }
8125 }
8126
8127 /*
8128  * TX block send credit interrupt.  Source is < 160.
8129  */
8130 static void is_send_credit_int(struct hfi1_devdata *dd, unsigned int source)
8131 {
8132         sc_group_release_update(dd, source);
8133 }
8134
8135 /*
8136  * TX block SDMA interrupt.  Source is < 48.
8137  *
8138  * SDMA interrupts are grouped by type:
8139  *
8140  *       0 -  N-1 = SDma
8141  *       N - 2N-1 = SDmaProgress
8142  *      2N - 3N-1 = SDmaIdle
8143  */
8144 static void is_sdma_eng_int(struct hfi1_devdata *dd, unsigned int source)
8145 {
8146         /* what interrupt */
8147         unsigned int what  = source / TXE_NUM_SDMA_ENGINES;
8148         /* which engine */
8149         unsigned int which = source % TXE_NUM_SDMA_ENGINES;
8150
8151 #ifdef CONFIG_SDMA_VERBOSITY
8152         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", which,
8153                    slashstrip(__FILE__), __LINE__, __func__);
8154         sdma_dumpstate(&dd->per_sdma[which]);
8155 #endif
8156
8157         if (likely(what < 3 && which < dd->num_sdma)) {
8158                 sdma_engine_interrupt(&dd->per_sdma[which], 1ull << source);
8159         } else {
8160                 /* should not happen */
8161                 dd_dev_err(dd, "Invalid SDMA interrupt 0x%x\n", source);
8162         }
8163 }
8164
8165 /**
8166  * is_rcv_avail_int() - User receive context available IRQ handler
8167  * @dd: valid dd
8168  * @source: logical IRQ source (offset from IS_RCVAVAIL_START)
8169  *
8170  * RX block receive available interrupt.  Source is < 160.
8171  *
8172  * This is the general interrupt handler for user (PSM) receive contexts,
8173  * and can only be used for non-threaded IRQs.
8174  */
8175 static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
8176 {
8177         struct hfi1_ctxtdata *rcd;
8178         char *err_detail;
8179
8180         if (likely(source < dd->num_rcv_contexts)) {
8181                 rcd = hfi1_rcd_get_by_index(dd, source);
8182                 if (rcd) {
8183                         handle_user_interrupt(rcd);
8184                         hfi1_rcd_put(rcd);
8185                         return; /* OK */
8186                 }
8187                 /* received an interrupt, but no rcd */
8188                 err_detail = "dataless";
8189         } else {
8190                 /* received an interrupt, but are not using that context */
8191                 err_detail = "out of range";
8192         }
8193         dd_dev_err(dd, "unexpected %s receive available context interrupt %u\n",
8194                    err_detail, source);
8195 }
8196
8197 /**
8198  * is_rcv_urgent_int() - User receive context urgent IRQ handler
8199  * @dd: valid dd
8200  * @source: logical IRQ source (offset from IS_RCVURGENT_START)
8201  *
8202  * RX block receive urgent interrupt.  Source is < 160.
8203  *
8204  * NOTE: kernel receive contexts specifically do NOT enable this IRQ.
8205  */
8206 static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
8207 {
8208         struct hfi1_ctxtdata *rcd;
8209         char *err_detail;
8210
8211         if (likely(source < dd->num_rcv_contexts)) {
8212                 rcd = hfi1_rcd_get_by_index(dd, source);
8213                 if (rcd) {
8214                         handle_user_interrupt(rcd);
8215                         hfi1_rcd_put(rcd);
8216                         return; /* OK */
8217                 }
8218                 /* received an interrupt, but no rcd */
8219                 err_detail = "dataless";
8220         } else {
8221                 /* received an interrupt, but are not using that context */
8222                 err_detail = "out of range";
8223         }
8224         dd_dev_err(dd, "unexpected %s receive urgent context interrupt %u\n",
8225                    err_detail, source);
8226 }
8227
8228 /*
8229  * Reserved range interrupt.  Should not be called in normal operation.
8230  */
8231 static void is_reserved_int(struct hfi1_devdata *dd, unsigned int source)
8232 {
8233         char name[64];
8234
8235         dd_dev_err(dd, "unexpected %s interrupt\n",
8236                    is_reserved_name(name, sizeof(name), source));
8237 }
8238
8239 static const struct is_table is_table[] = {
8240 /*
8241  * start                 end
8242  *                              name func               interrupt func
8243  */
8244 { IS_GENERAL_ERR_START,  IS_GENERAL_ERR_END,
8245                                 is_misc_err_name,       is_misc_err_int },
8246 { IS_SDMAENG_ERR_START,  IS_SDMAENG_ERR_END,
8247                                 is_sdma_eng_err_name,   is_sdma_eng_err_int },
8248 { IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
8249                                 is_sendctxt_err_name,   is_sendctxt_err_int },
8250 { IS_SDMA_START,             IS_SDMA_IDLE_END,
8251                                 is_sdma_eng_name,       is_sdma_eng_int },
8252 { IS_VARIOUS_START,          IS_VARIOUS_END,
8253                                 is_various_name,        is_various_int },
8254 { IS_DC_START,       IS_DC_END,
8255                                 is_dc_name,             is_dc_int },
8256 { IS_RCVAVAIL_START,     IS_RCVAVAIL_END,
8257                                 is_rcv_avail_name,      is_rcv_avail_int },
8258 { IS_RCVURGENT_START,    IS_RCVURGENT_END,
8259                                 is_rcv_urgent_name,     is_rcv_urgent_int },
8260 { IS_SENDCREDIT_START,   IS_SENDCREDIT_END,
8261                                 is_send_credit_name,    is_send_credit_int},
8262 { IS_RESERVED_START,     IS_RESERVED_END,
8263                                 is_reserved_name,       is_reserved_int},
8264 };
8265
8266 /*
8267  * Interrupt source interrupt - called when the given source has an interrupt.
8268  * Source is a bit index into an array of 64-bit integers.
8269  */
8270 static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
8271 {
8272         const struct is_table *entry;
8273
8274         /* avoids a double compare by walking the table in-order */
8275         for (entry = &is_table[0]; entry->is_name; entry++) {
8276                 if (source <= entry->end) {
8277                         trace_hfi1_interrupt(dd, entry, source);
8278                         entry->is_int(dd, source - entry->start);
8279                         return;
8280                 }
8281         }
8282         /* fell off the end */
8283         dd_dev_err(dd, "invalid interrupt source %u\n", source);
8284 }
8285
8286 /**
8287  * gerneral_interrupt() -  General interrupt handler
8288  * @irq: MSIx IRQ vector
8289  * @data: hfi1 devdata
8290  *
8291  * This is able to correctly handle all non-threaded interrupts.  Receive
8292  * context DATA IRQs are threaded and are not supported by this handler.
8293  *
8294  */
8295 irqreturn_t general_interrupt(int irq, void *data)
8296 {
8297         struct hfi1_devdata *dd = data;
8298         u64 regs[CCE_NUM_INT_CSRS];
8299         u32 bit;
8300         int i;
8301         irqreturn_t handled = IRQ_NONE;
8302
8303         this_cpu_inc(*dd->int_counter);
8304
8305         /* phase 1: scan and clear all handled interrupts */
8306         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
8307                 if (dd->gi_mask[i] == 0) {
8308                         regs[i] = 0;    /* used later */
8309                         continue;
8310                 }
8311                 regs[i] = read_csr(dd, CCE_INT_STATUS + (8 * i)) &
8312                                 dd->gi_mask[i];
8313                 /* only clear if anything is set */
8314                 if (regs[i])
8315                         write_csr(dd, CCE_INT_CLEAR + (8 * i), regs[i]);
8316         }
8317
8318         /* phase 2: call the appropriate handler */
8319         for_each_set_bit(bit, (unsigned long *)&regs[0],
8320                          CCE_NUM_INT_CSRS * 64) {
8321                 is_interrupt(dd, bit);
8322                 handled = IRQ_HANDLED;
8323         }
8324
8325         return handled;
8326 }
8327
8328 irqreturn_t sdma_interrupt(int irq, void *data)
8329 {
8330         struct sdma_engine *sde = data;
8331         struct hfi1_devdata *dd = sde->dd;
8332         u64 status;
8333
8334 #ifdef CONFIG_SDMA_VERBOSITY
8335         dd_dev_err(dd, "CONFIG SDMA(%u) %s:%d %s()\n", sde->this_idx,
8336                    slashstrip(__FILE__), __LINE__, __func__);
8337         sdma_dumpstate(sde);
8338 #endif
8339
8340         this_cpu_inc(*dd->int_counter);
8341
8342         /* This read_csr is really bad in the hot path */
8343         status = read_csr(dd,
8344                           CCE_INT_STATUS + (8 * (IS_SDMA_START / 64)))
8345                           & sde->imask;
8346         if (likely(status)) {
8347                 /* clear the interrupt(s) */
8348                 write_csr(dd,
8349                           CCE_INT_CLEAR + (8 * (IS_SDMA_START / 64)),
8350                           status);
8351
8352                 /* handle the interrupt(s) */
8353                 sdma_engine_interrupt(sde, status);
8354         } else {
8355                 dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
8356                                         sde->this_idx);
8357         }
8358         return IRQ_HANDLED;
8359 }
8360
8361 /*
8362  * Clear the receive interrupt.  Use a read of the interrupt clear CSR
8363  * to insure that the write completed.  This does NOT guarantee that
8364  * queued DMA writes to memory from the chip are pushed.
8365  */
8366 static inline void clear_recv_intr(struct hfi1_ctxtdata *rcd)
8367 {
8368         struct hfi1_devdata *dd = rcd->dd;
8369         u32 addr = CCE_INT_CLEAR + (8 * rcd->ireg);
8370
8371         write_csr(dd, addr, rcd->imask);
8372         /* force the above write on the chip and get a value back */
8373         (void)read_csr(dd, addr);
8374 }
8375
8376 /* force the receive interrupt */
8377 void force_recv_intr(struct hfi1_ctxtdata *rcd)
8378 {
8379         write_csr(rcd->dd, CCE_INT_FORCE + (8 * rcd->ireg), rcd->imask);
8380 }
8381
8382 /*
8383  * Return non-zero if a packet is present.
8384  *
8385  * This routine is called when rechecking for packets after the RcvAvail
8386  * interrupt has been cleared down.  First, do a quick check of memory for
8387  * a packet present.  If not found, use an expensive CSR read of the context
8388  * tail to determine the actual tail.  The CSR read is necessary because there
8389  * is no method to push pending DMAs to memory other than an interrupt and we
8390  * are trying to determine if we need to force an interrupt.
8391  */
8392 static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
8393 {
8394         u32 tail;
8395         int present;
8396
8397         if (!rcd->rcvhdrtail_kvaddr)
8398                 present = (rcd->seq_cnt ==
8399                                 rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
8400         else /* is RDMA rtail */
8401                 present = (rcd->head != get_rcvhdrtail(rcd));
8402
8403         if (present)
8404                 return 1;
8405
8406         /* fall back to a CSR read, correct indpendent of DMA_RTAIL */
8407         tail = (u32)read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
8408         return rcd->head != tail;
8409 }
8410
8411 /*
8412  * Receive packet IRQ handler.  This routine expects to be on its own IRQ.
8413  * This routine will try to handle packets immediately (latency), but if
8414  * it finds too many, it will invoke the thread handler (bandwitdh).  The
8415  * chip receive interrupt is *not* cleared down until this or the thread (if
8416  * invoked) is finished.  The intent is to avoid extra interrupts while we
8417  * are processing packets anyway.
8418  */
8419 irqreturn_t receive_context_interrupt(int irq, void *data)
8420 {
8421         struct hfi1_ctxtdata *rcd = data;
8422         struct hfi1_devdata *dd = rcd->dd;
8423         int disposition;
8424         int present;
8425
8426         trace_hfi1_receive_interrupt(dd, rcd);
8427         this_cpu_inc(*dd->int_counter);
8428         aspm_ctx_disable(rcd);
8429
8430         /* receive interrupt remains blocked while processing packets */
8431         disposition = rcd->do_interrupt(rcd, 0);
8432
8433         /*
8434          * Too many packets were seen while processing packets in this
8435          * IRQ handler.  Invoke the handler thread.  The receive interrupt
8436          * remains blocked.
8437          */
8438         if (disposition == RCV_PKT_LIMIT)
8439                 return IRQ_WAKE_THREAD;
8440
8441         /*
8442          * The packet processor detected no more packets.  Clear the receive
8443          * interrupt and recheck for a packet packet that may have arrived
8444          * after the previous check and interrupt clear.  If a packet arrived,
8445          * force another interrupt.
8446          */
8447         clear_recv_intr(rcd);
8448         present = check_packet_present(rcd);
8449         if (present)
8450                 force_recv_intr(rcd);
8451
8452         return IRQ_HANDLED;
8453 }
8454
8455 /*
8456  * Receive packet thread handler.  This expects to be invoked with the
8457  * receive interrupt still blocked.
8458  */
8459 irqreturn_t receive_context_thread(int irq, void *data)
8460 {
8461         struct hfi1_ctxtdata *rcd = data;
8462         int present;
8463
8464         /* receive interrupt is still blocked from the IRQ handler */
8465         (void)rcd->do_interrupt(rcd, 1);
8466
8467         /*
8468          * The packet processor will only return if it detected no more
8469          * packets.  Hold IRQs here so we can safely clear the interrupt and
8470          * recheck for a packet that may have arrived after the previous
8471          * check and the interrupt clear.  If a packet arrived, force another
8472          * interrupt.
8473          */
8474         local_irq_disable();
8475         clear_recv_intr(rcd);
8476         present = check_packet_present(rcd);
8477         if (present)
8478                 force_recv_intr(rcd);
8479         local_irq_enable();
8480
8481         return IRQ_HANDLED;
8482 }
8483
8484 /* ========================================================================= */
8485
8486 u32 read_physical_state(struct hfi1_devdata *dd)
8487 {
8488         u64 reg;
8489
8490         reg = read_csr(dd, DC_DC8051_STS_CUR_STATE);
8491         return (reg >> DC_DC8051_STS_CUR_STATE_PORT_SHIFT)
8492                                 & DC_DC8051_STS_CUR_STATE_PORT_MASK;
8493 }
8494
8495 u32 read_logical_state(struct hfi1_devdata *dd)
8496 {
8497         u64 reg;
8498
8499         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8500         return (reg >> DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT)
8501                                 & DCC_CFG_PORT_CONFIG_LINK_STATE_MASK;
8502 }
8503
8504 static void set_logical_state(struct hfi1_devdata *dd, u32 chip_lstate)
8505 {
8506         u64 reg;
8507
8508         reg = read_csr(dd, DCC_CFG_PORT_CONFIG);
8509         /* clear current state, set new state */
8510         reg &= ~DCC_CFG_PORT_CONFIG_LINK_STATE_SMASK;
8511         reg |= (u64)chip_lstate << DCC_CFG_PORT_CONFIG_LINK_STATE_SHIFT;
8512         write_csr(dd, DCC_CFG_PORT_CONFIG, reg);
8513 }
8514
8515 /*
8516  * Use the 8051 to read a LCB CSR.
8517  */
8518 static int read_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 *data)
8519 {
8520         u32 regno;
8521         int ret;
8522
8523         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
8524                 if (acquire_lcb_access(dd, 0) == 0) {
8525                         *data = read_csr(dd, addr);
8526                         release_lcb_access(dd, 0);
8527                         return 0;
8528                 }
8529                 return -EBUSY;
8530         }
8531
8532         /* register is an index of LCB registers: (offset - base) / 8 */
8533         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8534         ret = do_8051_command(dd, HCMD_READ_LCB_CSR, regno, data);
8535         if (ret != HCMD_SUCCESS)
8536                 return -EBUSY;
8537         return 0;
8538 }
8539
8540 /*
8541  * Provide a cache for some of the LCB registers in case the LCB is
8542  * unavailable.
8543  * (The LCB is unavailable in certain link states, for example.)
8544  */
8545 struct lcb_datum {
8546         u32 off;
8547         u64 val;
8548 };
8549
8550 static struct lcb_datum lcb_cache[] = {
8551         { DC_LCB_ERR_INFO_RX_REPLAY_CNT, 0},
8552         { DC_LCB_ERR_INFO_SEQ_CRC_CNT, 0 },
8553         { DC_LCB_ERR_INFO_REINIT_FROM_PEER_CNT, 0 },
8554 };
8555
8556 static void update_lcb_cache(struct hfi1_devdata *dd)
8557 {
8558         int i;
8559         int ret;
8560         u64 val;
8561
8562         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8563                 ret = read_lcb_csr(dd, lcb_cache[i].off, &val);
8564
8565                 /* Update if we get good data */
8566                 if (likely(ret != -EBUSY))
8567                         lcb_cache[i].val = val;
8568         }
8569 }
8570
8571 static int read_lcb_cache(u32 off, u64 *val)
8572 {
8573         int i;
8574
8575         for (i = 0; i < ARRAY_SIZE(lcb_cache); i++) {
8576                 if (lcb_cache[i].off == off) {
8577                         *val = lcb_cache[i].val;
8578                         return 0;
8579                 }
8580         }
8581
8582         pr_warn("%s bad offset 0x%x\n", __func__, off);
8583         return -1;
8584 }
8585
8586 /*
8587  * Read an LCB CSR.  Access may not be in host control, so check.
8588  * Return 0 on success, -EBUSY on failure.
8589  */
8590 int read_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 *data)
8591 {
8592         struct hfi1_pportdata *ppd = dd->pport;
8593
8594         /* if up, go through the 8051 for the value */
8595         if (ppd->host_link_state & HLS_UP)
8596                 return read_lcb_via_8051(dd, addr, data);
8597         /* if going up or down, check the cache, otherwise, no access */
8598         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE)) {
8599                 if (read_lcb_cache(addr, data))
8600                         return -EBUSY;
8601                 return 0;
8602         }
8603
8604         /* otherwise, host has access */
8605         *data = read_csr(dd, addr);
8606         return 0;
8607 }
8608
8609 /*
8610  * Use the 8051 to write a LCB CSR.
8611  */
8612 static int write_lcb_via_8051(struct hfi1_devdata *dd, u32 addr, u64 data)
8613 {
8614         u32 regno;
8615         int ret;
8616
8617         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR ||
8618             (dd->dc8051_ver < dc8051_ver(0, 20, 0))) {
8619                 if (acquire_lcb_access(dd, 0) == 0) {
8620                         write_csr(dd, addr, data);
8621                         release_lcb_access(dd, 0);
8622                         return 0;
8623                 }
8624                 return -EBUSY;
8625         }
8626
8627         /* register is an index of LCB registers: (offset - base) / 8 */
8628         regno = (addr - DC_LCB_CFG_RUN) >> 3;
8629         ret = do_8051_command(dd, HCMD_WRITE_LCB_CSR, regno, &data);
8630         if (ret != HCMD_SUCCESS)
8631                 return -EBUSY;
8632         return 0;
8633 }
8634
8635 /*
8636  * Write an LCB CSR.  Access may not be in host control, so check.
8637  * Return 0 on success, -EBUSY on failure.
8638  */
8639 int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
8640 {
8641         struct hfi1_pportdata *ppd = dd->pport;
8642
8643         /* if up, go through the 8051 for the value */
8644         if (ppd->host_link_state & HLS_UP)
8645                 return write_lcb_via_8051(dd, addr, data);
8646         /* if going up or down, no access */
8647         if (ppd->host_link_state & (HLS_GOING_UP | HLS_GOING_OFFLINE))
8648                 return -EBUSY;
8649         /* otherwise, host has access */
8650         write_csr(dd, addr, data);
8651         return 0;
8652 }
8653
8654 /*
8655  * Returns:
8656  *      < 0 = Linux error, not able to get access
8657  *      > 0 = 8051 command RETURN_CODE
8658  */
8659 static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
8660                            u64 *out_data)
8661 {
8662         u64 reg, completed;
8663         int return_code;
8664         unsigned long timeout;
8665
8666         hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
8667
8668         mutex_lock(&dd->dc8051_lock);
8669
8670         /* We can't send any commands to the 8051 if it's in reset */
8671         if (dd->dc_shutdown) {
8672                 return_code = -ENODEV;
8673                 goto fail;
8674         }
8675
8676         /*
8677          * If an 8051 host command timed out previously, then the 8051 is
8678          * stuck.
8679          *
8680          * On first timeout, attempt to reset and restart the entire DC
8681          * block (including 8051). (Is this too big of a hammer?)
8682          *
8683          * If the 8051 times out a second time, the reset did not bring it
8684          * back to healthy life. In that case, fail any subsequent commands.
8685          */
8686         if (dd->dc8051_timed_out) {
8687                 if (dd->dc8051_timed_out > 1) {
8688                         dd_dev_err(dd,
8689                                    "Previous 8051 host command timed out, skipping command %u\n",
8690                                    type);
8691                         return_code = -ENXIO;
8692                         goto fail;
8693                 }
8694                 _dc_shutdown(dd);
8695                 _dc_start(dd);
8696         }
8697
8698         /*
8699          * If there is no timeout, then the 8051 command interface is
8700          * waiting for a command.
8701          */
8702
8703         /*
8704          * When writing a LCB CSR, out_data contains the full value to
8705          * to be written, while in_data contains the relative LCB
8706          * address in 7:0.  Do the work here, rather than the caller,
8707          * of distrubting the write data to where it needs to go:
8708          *
8709          * Write data
8710          *   39:00 -> in_data[47:8]
8711          *   47:40 -> DC8051_CFG_EXT_DEV_0.RETURN_CODE
8712          *   63:48 -> DC8051_CFG_EXT_DEV_0.RSP_DATA
8713          */
8714         if (type == HCMD_WRITE_LCB_CSR) {
8715                 in_data |= ((*out_data) & 0xffffffffffull) << 8;
8716                 /* must preserve COMPLETED - it is tied to hardware */
8717                 reg = read_csr(dd, DC_DC8051_CFG_EXT_DEV_0);
8718                 reg &= DC_DC8051_CFG_EXT_DEV_0_COMPLETED_SMASK;
8719                 reg |= ((((*out_data) >> 40) & 0xff) <<
8720                                 DC_DC8051_CFG_EXT_DEV_0_RETURN_CODE_SHIFT)
8721                       | ((((*out_data) >> 48) & 0xffff) <<
8722                                 DC_DC8051_CFG_EXT_DEV_0_RSP_DATA_SHIFT);
8723                 write_csr(dd, DC_DC8051_CFG_EXT_DEV_0, reg);
8724         }
8725
8726         /*
8727          * Do two writes: the first to stabilize the type and req_data, the
8728          * second to activate.
8729          */
8730         reg = ((u64)type & DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_MASK)
8731                         << DC_DC8051_CFG_HOST_CMD_0_REQ_TYPE_SHIFT
8732                 | (in_data & DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_MASK)
8733                         << DC_DC8051_CFG_HOST_CMD_0_REQ_DATA_SHIFT;
8734         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8735         reg |= DC_DC8051_CFG_HOST_CMD_0_REQ_NEW_SMASK;
8736         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, reg);
8737
8738         /* wait for completion, alternate: interrupt */
8739         timeout = jiffies + msecs_to_jiffies(DC8051_COMMAND_TIMEOUT);
8740         while (1) {
8741                 reg = read_csr(dd, DC_DC8051_CFG_HOST_CMD_1);
8742                 completed = reg & DC_DC8051_CFG_HOST_CMD_1_COMPLETED_SMASK;
8743                 if (completed)
8744                         break;
8745                 if (time_after(jiffies, timeout)) {
8746                         dd->dc8051_timed_out++;
8747                         dd_dev_err(dd, "8051 host command %u timeout\n", type);
8748                         if (out_data)
8749                                 *out_data = 0;
8750                         return_code = -ETIMEDOUT;
8751                         goto fail;
8752                 }
8753                 udelay(2);
8754         }
8755
8756         if (out_data) {
8757                 *out_data = (reg >> DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_SHIFT)
8758                                 & DC_DC8051_CFG_HOST_CMD_1_RSP_DATA_MASK;
8759                 if (type == HCMD_READ_LCB_CSR) {
8760                         /* top 16 bits are in a different register */
8761                         *out_data |= (read_csr(dd, DC_DC8051_CFG_EXT_DEV_1)
8762                                 & DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SMASK)
8763                                 << (48
8764                                     - DC_DC8051_CFG_EXT_DEV_1_REQ_DATA_SHIFT);
8765                 }
8766         }
8767         return_code = (reg >> DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_SHIFT)
8768                                 & DC_DC8051_CFG_HOST_CMD_1_RETURN_CODE_MASK;
8769         dd->dc8051_timed_out = 0;
8770         /*
8771          * Clear command for next user.
8772          */
8773         write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
8774
8775 fail:
8776         mutex_unlock(&dd->dc8051_lock);
8777         return return_code;
8778 }
8779
8780 static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
8781 {
8782         return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
8783 }
8784
8785 int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
8786                      u8 lane_id, u32 config_data)
8787 {
8788         u64 data;
8789         int ret;
8790
8791         data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
8792                 | (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
8793                 | (u64)config_data << LOAD_DATA_DATA_SHIFT;
8794         ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
8795         if (ret != HCMD_SUCCESS) {
8796                 dd_dev_err(dd,
8797                            "load 8051 config: field id %d, lane %d, err %d\n",
8798                            (int)field_id, (int)lane_id, ret);
8799         }
8800         return ret;
8801 }
8802
8803 /*
8804  * Read the 8051 firmware "registers".  Use the RAM directly.  Always
8805  * set the result, even on error.
8806  * Return 0 on success, -errno on failure
8807  */
8808 int read_8051_config(struct hfi1_devdata *dd, u8 field_id, u8 lane_id,
8809                      u32 *result)
8810 {
8811         u64 big_data;
8812         u32 addr;
8813         int ret;
8814
8815         /* address start depends on the lane_id */
8816         if (lane_id < 4)
8817                 addr = (4 * NUM_GENERAL_FIELDS)
8818                         + (lane_id * 4 * NUM_LANE_FIELDS);
8819         else
8820                 addr = 0;
8821         addr += field_id * 4;
8822
8823         /* read is in 8-byte chunks, hardware will truncate the address down */
8824         ret = read_8051_data(dd, addr, 8, &big_data);
8825
8826         if (ret == 0) {
8827                 /* extract the 4 bytes we want */
8828                 if (addr & 0x4)
8829                         *result = (u32)(big_data >> 32);
8830                 else
8831                         *result = (u32)big_data;
8832         } else {
8833                 *result = 0;
8834                 dd_dev_err(dd, "%s: direct read failed, lane %d, field %d!\n",
8835                            __func__, lane_id, field_id);
8836         }
8837
8838         return ret;
8839 }
8840
8841 static int write_vc_local_phy(struct hfi1_devdata *dd, u8 power_management,
8842                               u8 continuous)
8843 {
8844         u32 frame;
8845
8846         frame = continuous << CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT
8847                 | power_management << POWER_MANAGEMENT_SHIFT;
8848         return load_8051_config(dd, VERIFY_CAP_LOCAL_PHY,
8849                                 GENERAL_CONFIG, frame);
8850 }
8851
8852 static int write_vc_local_fabric(struct hfi1_devdata *dd, u8 vau, u8 z, u8 vcu,
8853                                  u16 vl15buf, u8 crc_sizes)
8854 {
8855         u32 frame;
8856
8857         frame = (u32)vau << VAU_SHIFT
8858                 | (u32)z << Z_SHIFT
8859                 | (u32)vcu << VCU_SHIFT
8860                 | (u32)vl15buf << VL15BUF_SHIFT
8861                 | (u32)crc_sizes << CRC_SIZES_SHIFT;
8862         return load_8051_config(dd, VERIFY_CAP_LOCAL_FABRIC,
8863                                 GENERAL_CONFIG, frame);
8864 }
8865
8866 static void read_vc_local_link_mode(struct hfi1_devdata *dd, u8 *misc_bits,
8867                                     u8 *flag_bits, u16 *link_widths)
8868 {
8869         u32 frame;
8870
8871         read_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8872                          &frame);
8873         *misc_bits = (frame >> MISC_CONFIG_BITS_SHIFT) & MISC_CONFIG_BITS_MASK;
8874         *flag_bits = (frame >> LOCAL_FLAG_BITS_SHIFT) & LOCAL_FLAG_BITS_MASK;
8875         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8876 }
8877
8878 static int write_vc_local_link_mode(struct hfi1_devdata *dd,
8879                                     u8 misc_bits,
8880                                     u8 flag_bits,
8881                                     u16 link_widths)
8882 {
8883         u32 frame;
8884
8885         frame = (u32)misc_bits << MISC_CONFIG_BITS_SHIFT
8886                 | (u32)flag_bits << LOCAL_FLAG_BITS_SHIFT
8887                 | (u32)link_widths << LINK_WIDTH_SHIFT;
8888         return load_8051_config(dd, VERIFY_CAP_LOCAL_LINK_MODE, GENERAL_CONFIG,
8889                      frame);
8890 }
8891
8892 static int write_local_device_id(struct hfi1_devdata *dd, u16 device_id,
8893                                  u8 device_rev)
8894 {
8895         u32 frame;
8896
8897         frame = ((u32)device_id << LOCAL_DEVICE_ID_SHIFT)
8898                 | ((u32)device_rev << LOCAL_DEVICE_REV_SHIFT);
8899         return load_8051_config(dd, LOCAL_DEVICE_ID, GENERAL_CONFIG, frame);
8900 }
8901
8902 static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
8903                                   u8 *device_rev)
8904 {
8905         u32 frame;
8906
8907         read_8051_config(dd, REMOTE_DEVICE_ID, GENERAL_CONFIG, &frame);
8908         *device_id = (frame >> REMOTE_DEVICE_ID_SHIFT) & REMOTE_DEVICE_ID_MASK;
8909         *device_rev = (frame >> REMOTE_DEVICE_REV_SHIFT)
8910                         & REMOTE_DEVICE_REV_MASK;
8911 }
8912
8913 int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
8914 {
8915         u32 frame;
8916         u32 mask;
8917
8918         mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
8919         read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
8920         /* Clear, then set field */
8921         frame &= ~mask;
8922         frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
8923         return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
8924                                 frame);
8925 }
8926
8927 void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
8928                       u8 *ver_patch)
8929 {
8930         u32 frame;
8931
8932         read_8051_config(dd, MISC_STATUS, GENERAL_CONFIG, &frame);
8933         *ver_major = (frame >> STS_FM_VERSION_MAJOR_SHIFT) &
8934                 STS_FM_VERSION_MAJOR_MASK;
8935         *ver_minor = (frame >> STS_FM_VERSION_MINOR_SHIFT) &
8936                 STS_FM_VERSION_MINOR_MASK;
8937
8938         read_8051_config(dd, VERSION_PATCH, GENERAL_CONFIG, &frame);
8939         *ver_patch = (frame >> STS_FM_VERSION_PATCH_SHIFT) &
8940                 STS_FM_VERSION_PATCH_MASK;
8941 }
8942
8943 static void read_vc_remote_phy(struct hfi1_devdata *dd, u8 *power_management,
8944                                u8 *continuous)
8945 {
8946         u32 frame;
8947
8948         read_8051_config(dd, VERIFY_CAP_REMOTE_PHY, GENERAL_CONFIG, &frame);
8949         *power_management = (frame >> POWER_MANAGEMENT_SHIFT)
8950                                         & POWER_MANAGEMENT_MASK;
8951         *continuous = (frame >> CONTINIOUS_REMOTE_UPDATE_SUPPORT_SHIFT)
8952                                         & CONTINIOUS_REMOTE_UPDATE_SUPPORT_MASK;
8953 }
8954
8955 static void read_vc_remote_fabric(struct hfi1_devdata *dd, u8 *vau, u8 *z,
8956                                   u8 *vcu, u16 *vl15buf, u8 *crc_sizes)
8957 {
8958         u32 frame;
8959
8960         read_8051_config(dd, VERIFY_CAP_REMOTE_FABRIC, GENERAL_CONFIG, &frame);
8961         *vau = (frame >> VAU_SHIFT) & VAU_MASK;
8962         *z = (frame >> Z_SHIFT) & Z_MASK;
8963         *vcu = (frame >> VCU_SHIFT) & VCU_MASK;
8964         *vl15buf = (frame >> VL15BUF_SHIFT) & VL15BUF_MASK;
8965         *crc_sizes = (frame >> CRC_SIZES_SHIFT) & CRC_SIZES_MASK;
8966 }
8967
8968 static void read_vc_remote_link_width(struct hfi1_devdata *dd,
8969                                       u8 *remote_tx_rate,
8970                                       u16 *link_widths)
8971 {
8972         u32 frame;
8973
8974         read_8051_config(dd, VERIFY_CAP_REMOTE_LINK_WIDTH, GENERAL_CONFIG,
8975                          &frame);
8976         *remote_tx_rate = (frame >> REMOTE_TX_RATE_SHIFT)
8977                                 & REMOTE_TX_RATE_MASK;
8978         *link_widths = (frame >> LINK_WIDTH_SHIFT) & LINK_WIDTH_MASK;
8979 }
8980
8981 static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
8982 {
8983         u32 frame;
8984
8985         read_8051_config(dd, LOCAL_LNI_INFO, GENERAL_CONFIG, &frame);
8986         *enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
8987 }
8988
8989 static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
8990 {
8991         read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
8992 }
8993
8994 static void read_last_remote_state(struct hfi1_devdata *dd, u32 *lrs)
8995 {
8996         read_8051_config(dd, LAST_REMOTE_STATE_COMPLETE, GENERAL_CONFIG, lrs);
8997 }
8998
8999 void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality)
9000 {
9001         u32 frame;
9002         int ret;
9003
9004         *link_quality = 0;
9005         if (dd->pport->host_link_state & HLS_UP) {
9006                 ret = read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG,
9007                                        &frame);
9008                 if (ret == 0)
9009                         *link_quality = (frame >> LINK_QUALITY_SHIFT)
9010                                                 & LINK_QUALITY_MASK;
9011         }
9012 }
9013
9014 static void read_planned_down_reason_code(struct hfi1_devdata *dd, u8 *pdrrc)
9015 {
9016         u32 frame;
9017
9018         read_8051_config(dd, LINK_QUALITY_INFO, GENERAL_CONFIG, &frame);
9019         *pdrrc = (frame >> DOWN_REMOTE_REASON_SHIFT) & DOWN_REMOTE_REASON_MASK;
9020 }
9021
9022 static void read_link_down_reason(struct hfi1_devdata *dd, u8 *ldr)
9023 {
9024         u32 frame;
9025
9026         read_8051_config(dd, LINK_DOWN_REASON, GENERAL_CONFIG, &frame);
9027         *ldr = (frame & 0xff);
9028 }
9029
9030 static int read_tx_settings(struct hfi1_devdata *dd,
9031                             u8 *enable_lane_tx,
9032                             u8 *tx_polarity_inversion,
9033                             u8 *rx_polarity_inversion,
9034                             u8 *max_rate)
9035 {
9036         u32 frame;
9037         int ret;
9038
9039         ret = read_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, &frame);
9040         *enable_lane_tx = (frame >> ENABLE_LANE_TX_SHIFT)
9041                                 & ENABLE_LANE_TX_MASK;
9042         *tx_polarity_inversion = (frame >> TX_POLARITY_INVERSION_SHIFT)
9043                                 & TX_POLARITY_INVERSION_MASK;
9044         *rx_polarity_inversion = (frame >> RX_POLARITY_INVERSION_SHIFT)
9045                                 & RX_POLARITY_INVERSION_MASK;
9046         *max_rate = (frame >> MAX_RATE_SHIFT) & MAX_RATE_MASK;
9047         return ret;
9048 }
9049
9050 static int write_tx_settings(struct hfi1_devdata *dd,
9051                              u8 enable_lane_tx,
9052                              u8 tx_polarity_inversion,
9053                              u8 rx_polarity_inversion,
9054                              u8 max_rate)
9055 {
9056         u32 frame;
9057
9058         /* no need to mask, all variable sizes match field widths */
9059         frame = enable_lane_tx << ENABLE_LANE_TX_SHIFT
9060                 | tx_polarity_inversion << TX_POLARITY_INVERSION_SHIFT
9061                 | rx_polarity_inversion << RX_POLARITY_INVERSION_SHIFT
9062                 | max_rate << MAX_RATE_SHIFT;
9063         return load_8051_config(dd, TX_SETTINGS, GENERAL_CONFIG, frame);
9064 }
9065
9066 /*
9067  * Read an idle LCB message.
9068  *
9069  * Returns 0 on success, -EINVAL on error
9070  */
9071 static int read_idle_message(struct hfi1_devdata *dd, u64 type, u64 *data_out)
9072 {
9073         int ret;
9074
9075         ret = do_8051_command(dd, HCMD_READ_LCB_IDLE_MSG, type, data_out);
9076         if (ret != HCMD_SUCCESS) {
9077                 dd_dev_err(dd, "read idle message: type %d, err %d\n",
9078                            (u32)type, ret);
9079                 return -EINVAL;
9080         }
9081         dd_dev_info(dd, "%s: read idle message 0x%llx\n", __func__, *data_out);
9082         /* return only the payload as we already know the type */
9083         *data_out >>= IDLE_PAYLOAD_SHIFT;
9084         return 0;
9085 }
9086
9087 /*
9088  * Read an idle SMA message.  To be done in response to a notification from
9089  * the 8051.
9090  *
9091  * Returns 0 on success, -EINVAL on error
9092  */
9093 static int read_idle_sma(struct hfi1_devdata *dd, u64 *data)
9094 {
9095         return read_idle_message(dd, (u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT,
9096                                  data);
9097 }
9098
9099 /*
9100  * Send an idle LCB message.
9101  *
9102  * Returns 0 on success, -EINVAL on error
9103  */
9104 static int send_idle_message(struct hfi1_devdata *dd, u64 data)
9105 {
9106         int ret;
9107
9108         dd_dev_info(dd, "%s: sending idle message 0x%llx\n", __func__, data);
9109         ret = do_8051_command(dd, HCMD_SEND_LCB_IDLE_MSG, data, NULL);
9110         if (ret != HCMD_SUCCESS) {
9111                 dd_dev_err(dd, "send idle message: data 0x%llx, err %d\n",
9112                            data, ret);
9113                 return -EINVAL;
9114         }
9115         return 0;
9116 }
9117
9118 /*
9119  * Send an idle SMA message.
9120  *
9121  * Returns 0 on success, -EINVAL on error
9122  */
9123 int send_idle_sma(struct hfi1_devdata *dd, u64 message)
9124 {
9125         u64 data;
9126
9127         data = ((message & IDLE_PAYLOAD_MASK) << IDLE_PAYLOAD_SHIFT) |
9128                 ((u64)IDLE_SMA << IDLE_MSG_TYPE_SHIFT);
9129         return send_idle_message(dd, data);
9130 }
9131
9132 /*
9133  * Initialize the LCB then do a quick link up.  This may or may not be
9134  * in loopback.
9135  *
9136  * return 0 on success, -errno on error
9137  */
9138 static int do_quick_linkup(struct hfi1_devdata *dd)
9139 {
9140         int ret;
9141
9142         lcb_shutdown(dd, 0);
9143
9144         if (loopback) {
9145                 /* LCB_CFG_LOOPBACK.VAL = 2 */
9146                 /* LCB_CFG_LANE_WIDTH.VAL = 0 */
9147                 write_csr(dd, DC_LCB_CFG_LOOPBACK,
9148                           IB_PACKET_TYPE << DC_LCB_CFG_LOOPBACK_VAL_SHIFT);
9149                 write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
9150         }
9151
9152         /* start the LCBs */
9153         /* LCB_CFG_TX_FIFOS_RESET.VAL = 0 */
9154         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
9155
9156         /* simulator only loopback steps */
9157         if (loopback && dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
9158                 /* LCB_CFG_RUN.EN = 1 */
9159                 write_csr(dd, DC_LCB_CFG_RUN,
9160                           1ull << DC_LCB_CFG_RUN_EN_SHIFT);
9161
9162                 ret = wait_link_transfer_active(dd, 10);
9163                 if (ret)
9164                         return ret;
9165
9166                 write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP,
9167                           1ull << DC_LCB_CFG_ALLOW_LINK_UP_VAL_SHIFT);
9168         }
9169
9170         if (!loopback) {
9171                 /*
9172                  * When doing quick linkup and not in loopback, both
9173                  * sides must be done with LCB set-up before either
9174                  * starts the quick linkup.  Put a delay here so that
9175                  * both sides can be started and have a chance to be
9176                  * done with LCB set up before resuming.
9177                  */
9178                 dd_dev_err(dd,
9179                            "Pausing for peer to be finished with LCB set up\n");
9180                 msleep(5000);
9181                 dd_dev_err(dd, "Continuing with quick linkup\n");
9182         }
9183
9184         write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
9185         set_8051_lcb_access(dd);
9186
9187         /*
9188          * State "quick" LinkUp request sets the physical link state to
9189          * LinkUp without a verify capability sequence.
9190          * This state is in simulator v37 and later.
9191          */
9192         ret = set_physical_link_state(dd, PLS_QUICK_LINKUP);
9193         if (ret != HCMD_SUCCESS) {
9194                 dd_dev_err(dd,
9195                            "%s: set physical link state to quick LinkUp failed with return %d\n",
9196                            __func__, ret);
9197
9198                 set_host_lcb_access(dd);
9199                 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
9200
9201                 if (ret >= 0)
9202                         ret = -EINVAL;
9203                 return ret;
9204         }
9205
9206         return 0; /* success */
9207 }
9208
9209 /*
9210  * Do all special steps to set up loopback.
9211  */
9212 static int init_loopback(struct hfi1_devdata *dd)
9213 {
9214         dd_dev_info(dd, "Entering loopback mode\n");
9215
9216         /* all loopbacks should disable self GUID check */
9217         write_csr(dd, DC_DC8051_CFG_MODE,
9218                   (read_csr(dd, DC_DC8051_CFG_MODE) | DISABLE_SELF_GUID_CHECK));
9219
9220         /*
9221          * The simulator has only one loopback option - LCB.  Switch
9222          * to that option, which includes quick link up.
9223          *
9224          * Accept all valid loopback values.
9225          */
9226         if ((dd->icode == ICODE_FUNCTIONAL_SIMULATOR) &&
9227             (loopback == LOOPBACK_SERDES || loopback == LOOPBACK_LCB ||
9228              loopback == LOOPBACK_CABLE)) {
9229                 loopback = LOOPBACK_LCB;
9230                 quick_linkup = 1;
9231                 return 0;
9232         }
9233
9234         /*
9235          * SerDes loopback init sequence is handled in set_local_link_attributes
9236          */
9237         if (loopback == LOOPBACK_SERDES)
9238                 return 0;
9239
9240         /* LCB loopback - handled at poll time */
9241         if (loopback == LOOPBACK_LCB) {
9242                 quick_linkup = 1; /* LCB is always quick linkup */
9243
9244                 /* not supported in emulation due to emulation RTL changes */
9245                 if (dd->icode == ICODE_FPGA_EMULATION) {
9246                         dd_dev_err(dd,
9247                                    "LCB loopback not supported in emulation\n");
9248                         return -EINVAL;
9249                 }
9250                 return 0;
9251         }
9252
9253         /* external cable loopback requires no extra steps */
9254         if (loopback == LOOPBACK_CABLE)
9255                 return 0;
9256
9257         dd_dev_err(dd, "Invalid loopback mode %d\n", loopback);
9258         return -EINVAL;
9259 }
9260
9261 /*
9262  * Translate from the OPA_LINK_WIDTH handed to us by the FM to bits
9263  * used in the Verify Capability link width attribute.
9264  */
9265 static u16 opa_to_vc_link_widths(u16 opa_widths)
9266 {
9267         int i;
9268         u16 result = 0;
9269
9270         static const struct link_bits {
9271                 u16 from;
9272                 u16 to;
9273         } opa_link_xlate[] = {
9274                 { OPA_LINK_WIDTH_1X, 1 << (1 - 1)  },
9275                 { OPA_LINK_WIDTH_2X, 1 << (2 - 1)  },
9276                 { OPA_LINK_WIDTH_3X, 1 << (3 - 1)  },
9277                 { OPA_LINK_WIDTH_4X, 1 << (4 - 1)  },
9278         };
9279
9280         for (i = 0; i < ARRAY_SIZE(opa_link_xlate); i++) {
9281                 if (opa_widths & opa_link_xlate[i].from)
9282                         result |= opa_link_xlate[i].to;
9283         }
9284         return result;
9285 }
9286
9287 /*
9288  * Set link attributes before moving to polling.
9289  */
9290 static int set_local_link_attributes(struct hfi1_pportdata *ppd)
9291 {
9292         struct hfi1_devdata *dd = ppd->dd;
9293         u8 enable_lane_tx;
9294         u8 tx_polarity_inversion;
9295         u8 rx_polarity_inversion;
9296         int ret;
9297         u32 misc_bits = 0;
9298         /* reset our fabric serdes to clear any lingering problems */
9299         fabric_serdes_reset(dd);
9300
9301         /* set the local tx rate - need to read-modify-write */
9302         ret = read_tx_settings(dd, &enable_lane_tx, &tx_polarity_inversion,
9303                                &rx_polarity_inversion, &ppd->local_tx_rate);
9304         if (ret)
9305                 goto set_local_link_attributes_fail;
9306
9307         if (dd->dc8051_ver < dc8051_ver(0, 20, 0)) {
9308                 /* set the tx rate to the fastest enabled */
9309                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9310                         ppd->local_tx_rate = 1;
9311                 else
9312                         ppd->local_tx_rate = 0;
9313         } else {
9314                 /* set the tx rate to all enabled */
9315                 ppd->local_tx_rate = 0;
9316                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_25G)
9317                         ppd->local_tx_rate |= 2;
9318                 if (ppd->link_speed_enabled & OPA_LINK_SPEED_12_5G)
9319                         ppd->local_tx_rate |= 1;
9320         }
9321
9322         enable_lane_tx = 0xF; /* enable all four lanes */
9323         ret = write_tx_settings(dd, enable_lane_tx, tx_polarity_inversion,
9324                                 rx_polarity_inversion, ppd->local_tx_rate);
9325         if (ret != HCMD_SUCCESS)
9326                 goto set_local_link_attributes_fail;
9327
9328         ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
9329         if (ret != HCMD_SUCCESS) {
9330                 dd_dev_err(dd,
9331                            "Failed to set host interface version, return 0x%x\n",
9332                            ret);
9333                 goto set_local_link_attributes_fail;
9334         }
9335
9336         /*
9337          * DC supports continuous updates.
9338          */
9339         ret = write_vc_local_phy(dd,
9340                                  0 /* no power management */,
9341                                  1 /* continuous updates */);
9342         if (ret != HCMD_SUCCESS)
9343                 goto set_local_link_attributes_fail;
9344
9345         /* z=1 in the next call: AU of 0 is not supported by the hardware */
9346         ret = write_vc_local_fabric(dd, dd->vau, 1, dd->vcu, dd->vl15_init,
9347                                     ppd->port_crc_mode_enabled);
9348         if (ret != HCMD_SUCCESS)
9349                 goto set_local_link_attributes_fail;
9350
9351         /*
9352          * SerDes loopback init sequence requires
9353          * setting bit 0 of MISC_CONFIG_BITS
9354          */
9355         if (loopback == LOOPBACK_SERDES)
9356                 misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
9357
9358         /*
9359          * An external device configuration request is used to reset the LCB
9360          * to retry to obtain operational lanes when the first attempt is
9361          * unsuccesful.
9362          */
9363         if (dd->dc8051_ver >= dc8051_ver(1, 25, 0))
9364                 misc_bits |= 1 << EXT_CFG_LCB_RESET_SUPPORTED_SHIFT;
9365
9366         ret = write_vc_local_link_mode(dd, misc_bits, 0,
9367                                        opa_to_vc_link_widths(
9368                                                 ppd->link_width_enabled));
9369         if (ret != HCMD_SUCCESS)
9370                 goto set_local_link_attributes_fail;
9371
9372         /* let peer know who we are */
9373         ret = write_local_device_id(dd, dd->pcidev->device, dd->minrev);
9374         if (ret == HCMD_SUCCESS)
9375                 return 0;
9376
9377 set_local_link_attributes_fail:
9378         dd_dev_err(dd,
9379                    "Failed to set local link attributes, return 0x%x\n",
9380                    ret);
9381         return ret;
9382 }
9383
9384 /*
9385  * Call this to start the link.
9386  * Do not do anything if the link is disabled.
9387  * Returns 0 if link is disabled, moved to polling, or the driver is not ready.
9388  */
9389 int start_link(struct hfi1_pportdata *ppd)
9390 {
9391         /*
9392          * Tune the SerDes to a ballpark setting for optimal signal and bit
9393          * error rate.  Needs to be done before starting the link.
9394          */
9395         tune_serdes(ppd);
9396
9397         if (!ppd->driver_link_ready) {
9398                 dd_dev_info(ppd->dd,
9399                             "%s: stopping link start because driver is not ready\n",
9400                             __func__);
9401                 return 0;
9402         }
9403
9404         /*
9405          * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
9406          * pkey table can be configured properly if the HFI unit is connected
9407          * to switch port with MgmtAllowed=NO
9408          */
9409         clear_full_mgmt_pkey(ppd);
9410
9411         return set_link_state(ppd, HLS_DN_POLL);
9412 }
9413
9414 static void wait_for_qsfp_init(struct hfi1_pportdata *ppd)
9415 {
9416         struct hfi1_devdata *dd = ppd->dd;
9417         u64 mask;
9418         unsigned long timeout;
9419
9420         /*
9421          * Some QSFP cables have a quirk that asserts the IntN line as a side
9422          * effect of power up on plug-in. We ignore this false positive
9423          * interrupt until the module has finished powering up by waiting for
9424          * a minimum timeout of the module inrush initialization time of
9425          * 500 ms (SFF 8679 Table 5-6) to ensure the voltage rails in the
9426          * module have stabilized.
9427          */
9428         msleep(500);
9429
9430         /*
9431          * Check for QSFP interrupt for t_init (SFF 8679 Table 8-1)
9432          */
9433         timeout = jiffies + msecs_to_jiffies(2000);
9434         while (1) {
9435                 mask = read_csr(dd, dd->hfi1_id ?
9436                                 ASIC_QSFP2_IN : ASIC_QSFP1_IN);
9437                 if (!(mask & QSFP_HFI0_INT_N))
9438                         break;
9439                 if (time_after(jiffies, timeout)) {
9440                         dd_dev_info(dd, "%s: No IntN detected, reset complete\n",
9441                                     __func__);
9442                         break;
9443                 }
9444                 udelay(2);
9445         }
9446 }
9447
9448 static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9449 {
9450         struct hfi1_devdata *dd = ppd->dd;
9451         u64 mask;
9452
9453         mask = read_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK);
9454         if (enable) {
9455                 /*
9456                  * Clear the status register to avoid an immediate interrupt
9457                  * when we re-enable the IntN pin
9458                  */
9459                 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9460                           QSFP_HFI0_INT_N);
9461                 mask |= (u64)QSFP_HFI0_INT_N;
9462         } else {
9463                 mask &= ~(u64)QSFP_HFI0_INT_N;
9464         }
9465         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9466 }
9467
9468 int reset_qsfp(struct hfi1_pportdata *ppd)
9469 {
9470         struct hfi1_devdata *dd = ppd->dd;
9471         u64 mask, qsfp_mask;
9472
9473         /* Disable INT_N from triggering QSFP interrupts */
9474         set_qsfp_int_n(ppd, 0);
9475
9476         /* Reset the QSFP */
9477         mask = (u64)QSFP_HFI0_RESET_N;
9478
9479         qsfp_mask = read_csr(dd,
9480                              dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT);
9481         qsfp_mask &= ~mask;
9482         write_csr(dd,
9483                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9484
9485         udelay(10);
9486
9487         qsfp_mask |= mask;
9488         write_csr(dd,
9489                   dd->hfi1_id ? ASIC_QSFP2_OUT : ASIC_QSFP1_OUT, qsfp_mask);
9490
9491         wait_for_qsfp_init(ppd);
9492
9493         /*
9494          * Allow INT_N to trigger the QSFP interrupt to watch
9495          * for alarms and warnings
9496          */
9497         set_qsfp_int_n(ppd, 1);
9498
9499         /*
9500          * After the reset, AOC transmitters are enabled by default. They need
9501          * to be turned off to complete the QSFP setup before they can be
9502          * enabled again.
9503          */
9504         return set_qsfp_tx(ppd, 0);
9505 }
9506
9507 static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
9508                                         u8 *qsfp_interrupt_status)
9509 {
9510         struct hfi1_devdata *dd = ppd->dd;
9511
9512         if ((qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_ALARM) ||
9513             (qsfp_interrupt_status[0] & QSFP_HIGH_TEMP_WARNING))
9514                 dd_dev_err(dd, "%s: QSFP cable temperature too high\n",
9515                            __func__);
9516
9517         if ((qsfp_interrupt_status[0] & QSFP_LOW_TEMP_ALARM) ||
9518             (qsfp_interrupt_status[0] & QSFP_LOW_TEMP_WARNING))
9519                 dd_dev_err(dd, "%s: QSFP cable temperature too low\n",
9520                            __func__);
9521
9522         /*
9523          * The remaining alarms/warnings don't matter if the link is down.
9524          */
9525         if (ppd->host_link_state & HLS_DOWN)
9526                 return 0;
9527
9528         if ((qsfp_interrupt_status[1] & QSFP_HIGH_VCC_ALARM) ||
9529             (qsfp_interrupt_status[1] & QSFP_HIGH_VCC_WARNING))
9530                 dd_dev_err(dd, "%s: QSFP supply voltage too high\n",
9531                            __func__);
9532
9533         if ((qsfp_interrupt_status[1] & QSFP_LOW_VCC_ALARM) ||
9534             (qsfp_interrupt_status[1] & QSFP_LOW_VCC_WARNING))
9535                 dd_dev_err(dd, "%s: QSFP supply voltage too low\n",
9536                            __func__);
9537
9538         /* Byte 2 is vendor specific */
9539
9540         if ((qsfp_interrupt_status[3] & QSFP_HIGH_POWER_ALARM) ||
9541             (qsfp_interrupt_status[3] & QSFP_HIGH_POWER_WARNING))
9542                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too high\n",
9543                            __func__);
9544
9545         if ((qsfp_interrupt_status[3] & QSFP_LOW_POWER_ALARM) ||
9546             (qsfp_interrupt_status[3] & QSFP_LOW_POWER_WARNING))
9547                 dd_dev_err(dd, "%s: Cable RX channel 1/2 power too low\n",
9548                            __func__);
9549
9550         if ((qsfp_interrupt_status[4] & QSFP_HIGH_POWER_ALARM) ||
9551             (qsfp_interrupt_status[4] & QSFP_HIGH_POWER_WARNING))
9552                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too high\n",
9553                            __func__);
9554
9555         if ((qsfp_interrupt_status[4] & QSFP_LOW_POWER_ALARM) ||
9556             (qsfp_interrupt_status[4] & QSFP_LOW_POWER_WARNING))
9557                 dd_dev_err(dd, "%s: Cable RX channel 3/4 power too low\n",
9558                            __func__);
9559
9560         if ((qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_ALARM) ||
9561             (qsfp_interrupt_status[5] & QSFP_HIGH_BIAS_WARNING))
9562                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too high\n",
9563                            __func__);
9564
9565         if ((qsfp_interrupt_status[5] & QSFP_LOW_BIAS_ALARM) ||
9566             (qsfp_interrupt_status[5] & QSFP_LOW_BIAS_WARNING))
9567                 dd_dev_err(dd, "%s: Cable TX channel 1/2 bias too low\n",
9568                            __func__);
9569
9570         if ((qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_ALARM) ||
9571             (qsfp_interrupt_status[6] & QSFP_HIGH_BIAS_WARNING))
9572                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too high\n",
9573                            __func__);
9574
9575         if ((qsfp_interrupt_status[6] & QSFP_LOW_BIAS_ALARM) ||
9576             (qsfp_interrupt_status[6] & QSFP_LOW_BIAS_WARNING))
9577                 dd_dev_err(dd, "%s: Cable TX channel 3/4 bias too low\n",
9578                            __func__);
9579
9580         if ((qsfp_interrupt_status[7] & QSFP_HIGH_POWER_ALARM) ||
9581             (qsfp_interrupt_status[7] & QSFP_HIGH_POWER_WARNING))
9582                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too high\n",
9583                            __func__);
9584
9585         if ((qsfp_interrupt_status[7] & QSFP_LOW_POWER_ALARM) ||
9586             (qsfp_interrupt_status[7] & QSFP_LOW_POWER_WARNING))
9587                 dd_dev_err(dd, "%s: Cable TX channel 1/2 power too low\n",
9588                            __func__);
9589
9590         if ((qsfp_interrupt_status[8] & QSFP_HIGH_POWER_ALARM) ||
9591             (qsfp_interrupt_status[8] & QSFP_HIGH_POWER_WARNING))
9592                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too high\n",
9593                            __func__);
9594
9595         if ((qsfp_interrupt_status[8] & QSFP_LOW_POWER_ALARM) ||
9596             (qsfp_interrupt_status[8] & QSFP_LOW_POWER_WARNING))
9597                 dd_dev_err(dd, "%s: Cable TX channel 3/4 power too low\n",
9598                            __func__);
9599
9600         /* Bytes 9-10 and 11-12 are reserved */
9601         /* Bytes 13-15 are vendor specific */
9602
9603         return 0;
9604 }
9605
9606 /* This routine will only be scheduled if the QSFP module present is asserted */
9607 void qsfp_event(struct work_struct *work)
9608 {
9609         struct qsfp_data *qd;
9610         struct hfi1_pportdata *ppd;
9611         struct hfi1_devdata *dd;
9612
9613         qd = container_of(work, struct qsfp_data, qsfp_work);
9614         ppd = qd->ppd;
9615         dd = ppd->dd;
9616
9617         /* Sanity check */
9618         if (!qsfp_mod_present(ppd))
9619                 return;
9620
9621         if (ppd->host_link_state == HLS_DN_DISABLE) {
9622                 dd_dev_info(ppd->dd,
9623                             "%s: stopping link start because link is disabled\n",
9624                             __func__);
9625                 return;
9626         }
9627
9628         /*
9629          * Turn DC back on after cable has been re-inserted. Up until
9630          * now, the DC has been in reset to save power.
9631          */
9632         dc_start(dd);
9633
9634         if (qd->cache_refresh_required) {
9635                 set_qsfp_int_n(ppd, 0);
9636
9637                 wait_for_qsfp_init(ppd);
9638
9639                 /*
9640                  * Allow INT_N to trigger the QSFP interrupt to watch
9641                  * for alarms and warnings
9642                  */
9643                 set_qsfp_int_n(ppd, 1);
9644
9645                 start_link(ppd);
9646         }
9647
9648         if (qd->check_interrupt_flags) {
9649                 u8 qsfp_interrupt_status[16] = {0,};
9650
9651                 if (one_qsfp_read(ppd, dd->hfi1_id, 6,
9652                                   &qsfp_interrupt_status[0], 16) != 16) {
9653                         dd_dev_info(dd,
9654                                     "%s: Failed to read status of QSFP module\n",
9655                                     __func__);
9656                 } else {
9657                         unsigned long flags;
9658
9659                         handle_qsfp_error_conditions(
9660                                         ppd, qsfp_interrupt_status);
9661                         spin_lock_irqsave(&ppd->qsfp_info.qsfp_lock, flags);
9662                         ppd->qsfp_info.check_interrupt_flags = 0;
9663                         spin_unlock_irqrestore(&ppd->qsfp_info.qsfp_lock,
9664                                                flags);
9665                 }
9666         }
9667 }
9668
9669 void init_qsfp_int(struct hfi1_devdata *dd)
9670 {
9671         struct hfi1_pportdata *ppd = dd->pport;
9672         u64 qsfp_mask;
9673
9674         qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
9675         /* Clear current status to avoid spurious interrupts */
9676         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_CLEAR : ASIC_QSFP1_CLEAR,
9677                   qsfp_mask);
9678         write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK,
9679                   qsfp_mask);
9680
9681         set_qsfp_int_n(ppd, 0);
9682
9683         /* Handle active low nature of INT_N and MODPRST_N pins */
9684         if (qsfp_mod_present(ppd))
9685                 qsfp_mask &= ~(u64)QSFP_HFI0_MODPRST_N;
9686         write_csr(dd,
9687                   dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
9688                   qsfp_mask);
9689
9690         /* Enable the appropriate QSFP IRQ source */
9691         if (!dd->hfi1_id)
9692                 set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
9693         else
9694                 set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
9695 }
9696
9697 /*
9698  * Do a one-time initialize of the LCB block.
9699  */
9700 static void init_lcb(struct hfi1_devdata *dd)
9701 {
9702         /* simulator does not correctly handle LCB cclk loopback, skip */
9703         if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
9704                 return;
9705
9706         /* the DC has been reset earlier in the driver load */
9707
9708         /* set LCB for cclk loopback on the port */
9709         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x01);
9710         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0x00);
9711         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0x00);
9712         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
9713         write_csr(dd, DC_LCB_CFG_CLK_CNTR, 0x08);
9714         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x02);
9715         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0x00);
9716 }
9717
9718 /*
9719  * Perform a test read on the QSFP.  Return 0 on success, -ERRNO
9720  * on error.
9721  */
9722 static int test_qsfp_read(struct hfi1_pportdata *ppd)
9723 {
9724         int ret;
9725         u8 status;
9726
9727         /*
9728          * Report success if not a QSFP or, if it is a QSFP, but the cable is
9729          * not present
9730          */
9731         if (ppd->port_type != PORT_TYPE_QSFP || !qsfp_mod_present(ppd))
9732                 return 0;
9733
9734         /* read byte 2, the status byte */
9735         ret = one_qsfp_read(ppd, ppd->dd->hfi1_id, 2, &status, 1);
9736         if (ret < 0)
9737                 return ret;
9738         if (ret != 1)
9739                 return -EIO;
9740
9741         return 0; /* success */
9742 }
9743
9744 /*
9745  * Values for QSFP retry.
9746  *
9747  * Give up after 10s (20 x 500ms).  The overall timeout was empirically
9748  * arrived at from experience on a large cluster.
9749  */
9750 #define MAX_QSFP_RETRIES 20
9751 #define QSFP_RETRY_WAIT 500 /* msec */
9752
9753 /*
9754  * Try a QSFP read.  If it fails, schedule a retry for later.
9755  * Called on first link activation after driver load.
9756  */
9757 static void try_start_link(struct hfi1_pportdata *ppd)
9758 {
9759         if (test_qsfp_read(ppd)) {
9760                 /* read failed */
9761                 if (ppd->qsfp_retry_count >= MAX_QSFP_RETRIES) {
9762                         dd_dev_err(ppd->dd, "QSFP not responding, giving up\n");
9763                         return;
9764                 }
9765                 dd_dev_info(ppd->dd,
9766                             "QSFP not responding, waiting and retrying %d\n",
9767                             (int)ppd->qsfp_retry_count);
9768                 ppd->qsfp_retry_count++;
9769                 queue_delayed_work(ppd->link_wq, &ppd->start_link_work,
9770                                    msecs_to_jiffies(QSFP_RETRY_WAIT));
9771                 return;
9772         }
9773         ppd->qsfp_retry_count = 0;
9774
9775         start_link(ppd);
9776 }
9777
9778 /*
9779  * Workqueue function to start the link after a delay.
9780  */
9781 void handle_start_link(struct work_struct *work)
9782 {
9783         struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
9784                                                   start_link_work.work);
9785         try_start_link(ppd);
9786 }
9787
9788 int bringup_serdes(struct hfi1_pportdata *ppd)
9789 {
9790         struct hfi1_devdata *dd = ppd->dd;
9791         u64 guid;
9792         int ret;
9793
9794         if (HFI1_CAP_IS_KSET(EXTENDED_PSN))
9795                 add_rcvctrl(dd, RCV_CTRL_RCV_EXTENDED_PSN_ENABLE_SMASK);
9796
9797         guid = ppd->guids[HFI1_PORT_GUID_INDEX];
9798         if (!guid) {
9799                 if (dd->base_guid)
9800                         guid = dd->base_guid + ppd->port - 1;
9801                 ppd->guids[HFI1_PORT_GUID_INDEX] = guid;
9802         }
9803
9804         /* Set linkinit_reason on power up per OPA spec */
9805         ppd->linkinit_reason = OPA_LINKINIT_REASON_LINKUP;
9806
9807         /* one-time init of the LCB */
9808         init_lcb(dd);
9809
9810         if (loopback) {
9811                 ret = init_loopback(dd);
9812                 if (ret < 0)
9813                         return ret;
9814         }
9815
9816         get_port_type(ppd);
9817         if (ppd->port_type == PORT_TYPE_QSFP) {
9818                 set_qsfp_int_n(ppd, 0);
9819                 wait_for_qsfp_init(ppd);
9820                 set_qsfp_int_n(ppd, 1);
9821         }
9822
9823         try_start_link(ppd);
9824         return 0;
9825 }
9826
9827 void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
9828 {
9829         struct hfi1_devdata *dd = ppd->dd;
9830
9831         /*
9832          * Shut down the link and keep it down.   First turn off that the
9833          * driver wants to allow the link to be up (driver_link_ready).
9834          * Then make sure the link is not automatically restarted
9835          * (link_enabled).  Cancel any pending restart.  And finally
9836          * go offline.
9837          */
9838         ppd->driver_link_ready = 0;
9839         ppd->link_enabled = 0;
9840
9841         ppd->qsfp_retry_count = MAX_QSFP_RETRIES; /* prevent more retries */
9842         flush_delayed_work(&ppd->start_link_work);
9843         cancel_delayed_work_sync(&ppd->start_link_work);
9844
9845         ppd->offline_disabled_reason =
9846                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
9847         set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
9848                              OPA_LINKDOWN_REASON_REBOOT);
9849         set_link_state(ppd, HLS_DN_OFFLINE);
9850
9851         /* disable the port */
9852         clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
9853         cancel_work_sync(&ppd->freeze_work);
9854 }
9855
9856 static inline int init_cpu_counters(struct hfi1_devdata *dd)
9857 {
9858         struct hfi1_pportdata *ppd;
9859         int i;
9860
9861         ppd = (struct hfi1_pportdata *)(dd + 1);
9862         for (i = 0; i < dd->num_pports; i++, ppd++) {
9863                 ppd->ibport_data.rvp.rc_acks = NULL;
9864                 ppd->ibport_data.rvp.rc_qacks = NULL;
9865                 ppd->ibport_data.rvp.rc_acks = alloc_percpu(u64);
9866                 ppd->ibport_data.rvp.rc_qacks = alloc_percpu(u64);
9867                 ppd->ibport_data.rvp.rc_delayed_comp = alloc_percpu(u64);
9868                 if (!ppd->ibport_data.rvp.rc_acks ||
9869                     !ppd->ibport_data.rvp.rc_delayed_comp ||
9870                     !ppd->ibport_data.rvp.rc_qacks)
9871                         return -ENOMEM;
9872         }
9873
9874         return 0;
9875 }
9876
9877 /*
9878  * index is the index into the receive array
9879  */
9880 void hfi1_put_tid(struct hfi1_devdata *dd, u32 index,
9881                   u32 type, unsigned long pa, u16 order)
9882 {
9883         u64 reg;
9884
9885         if (!(dd->flags & HFI1_PRESENT))
9886                 goto done;
9887
9888         if (type == PT_INVALID || type == PT_INVALID_FLUSH) {
9889                 pa = 0;
9890                 order = 0;
9891         } else if (type > PT_INVALID) {
9892                 dd_dev_err(dd,
9893                            "unexpected receive array type %u for index %u, not handled\n",
9894                            type, index);
9895                 goto done;
9896         }
9897         trace_hfi1_put_tid(dd, index, type, pa, order);
9898
9899 #define RT_ADDR_SHIFT 12        /* 4KB kernel address boundary */
9900         reg = RCV_ARRAY_RT_WRITE_ENABLE_SMASK
9901                 | (u64)order << RCV_ARRAY_RT_BUF_SIZE_SHIFT
9902                 | ((pa >> RT_ADDR_SHIFT) & RCV_ARRAY_RT_ADDR_MASK)
9903                                         << RCV_ARRAY_RT_ADDR_SHIFT;
9904         trace_hfi1_write_rcvarray(dd->rcvarray_wc + (index * 8), reg);
9905         writeq(reg, dd->rcvarray_wc + (index * 8));
9906
9907         if (type == PT_EAGER || type == PT_INVALID_FLUSH || (index & 3) == 3)
9908                 /*
9909                  * Eager entries are written and flushed
9910                  *
9911                  * Expected entries are flushed every 4 writes
9912                  */
9913                 flush_wc();
9914 done:
9915         return;
9916 }
9917
9918 void hfi1_clear_tids(struct hfi1_ctxtdata *rcd)
9919 {
9920         struct hfi1_devdata *dd = rcd->dd;
9921         u32 i;
9922
9923         /* this could be optimized */
9924         for (i = rcd->eager_base; i < rcd->eager_base +
9925                      rcd->egrbufs.alloced; i++)
9926                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9927
9928         for (i = rcd->expected_base;
9929                         i < rcd->expected_base + rcd->expected_count; i++)
9930                 hfi1_put_tid(dd, i, PT_INVALID, 0, 0);
9931 }
9932
9933 static const char * const ib_cfg_name_strings[] = {
9934         "HFI1_IB_CFG_LIDLMC",
9935         "HFI1_IB_CFG_LWID_DG_ENB",
9936         "HFI1_IB_CFG_LWID_ENB",
9937         "HFI1_IB_CFG_LWID",
9938         "HFI1_IB_CFG_SPD_ENB",
9939         "HFI1_IB_CFG_SPD",
9940         "HFI1_IB_CFG_RXPOL_ENB",
9941         "HFI1_IB_CFG_LREV_ENB",
9942         "HFI1_IB_CFG_LINKLATENCY",
9943         "HFI1_IB_CFG_HRTBT",
9944         "HFI1_IB_CFG_OP_VLS",
9945         "HFI1_IB_CFG_VL_HIGH_CAP",
9946         "HFI1_IB_CFG_VL_LOW_CAP",
9947         "HFI1_IB_CFG_OVERRUN_THRESH",
9948         "HFI1_IB_CFG_PHYERR_THRESH",
9949         "HFI1_IB_CFG_LINKDEFAULT",
9950         "HFI1_IB_CFG_PKEYS",
9951         "HFI1_IB_CFG_MTU",
9952         "HFI1_IB_CFG_LSTATE",
9953         "HFI1_IB_CFG_VL_HIGH_LIMIT",
9954         "HFI1_IB_CFG_PMA_TICKS",
9955         "HFI1_IB_CFG_PORT"
9956 };
9957
9958 static const char *ib_cfg_name(int which)
9959 {
9960         if (which < 0 || which >= ARRAY_SIZE(ib_cfg_name_strings))
9961                 return "invalid";
9962         return ib_cfg_name_strings[which];
9963 }
9964
9965 int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
9966 {
9967         struct hfi1_devdata *dd = ppd->dd;
9968         int val = 0;
9969
9970         switch (which) {
9971         case HFI1_IB_CFG_LWID_ENB: /* allowed Link-width */
9972                 val = ppd->link_width_enabled;
9973                 break;
9974         case HFI1_IB_CFG_LWID: /* currently active Link-width */
9975                 val = ppd->link_width_active;
9976                 break;
9977         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
9978                 val = ppd->link_speed_enabled;
9979                 break;
9980         case HFI1_IB_CFG_SPD: /* current Link speed */
9981                 val = ppd->link_speed_active;
9982                 break;
9983
9984         case HFI1_IB_CFG_RXPOL_ENB: /* Auto-RX-polarity enable */
9985         case HFI1_IB_CFG_LREV_ENB: /* Auto-Lane-reversal enable */
9986         case HFI1_IB_CFG_LINKLATENCY:
9987                 goto unimplemented;
9988
9989         case HFI1_IB_CFG_OP_VLS:
9990                 val = ppd->actual_vls_operational;
9991                 break;
9992         case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
9993                 val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
9994                 break;
9995         case HFI1_IB_CFG_VL_LOW_CAP: /* VL arb low priority table size */
9996                 val = VL_ARB_LOW_PRIO_TABLE_SIZE;
9997                 break;
9998         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
9999                 val = ppd->overrun_threshold;
10000                 break;
10001         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10002                 val = ppd->phy_error_threshold;
10003                 break;
10004         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10005                 val = HLS_DEFAULT;
10006                 break;
10007
10008         case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
10009         case HFI1_IB_CFG_PMA_TICKS:
10010         default:
10011 unimplemented:
10012                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10013                         dd_dev_info(
10014                                 dd,
10015                                 "%s: which %s: not implemented\n",
10016                                 __func__,
10017                                 ib_cfg_name(which));
10018                 break;
10019         }
10020
10021         return val;
10022 }
10023
10024 /*
10025  * The largest MAD packet size.
10026  */
10027 #define MAX_MAD_PACKET 2048
10028
10029 /*
10030  * Return the maximum header bytes that can go on the _wire_
10031  * for this device. This count includes the ICRC which is
10032  * not part of the packet held in memory but it is appended
10033  * by the HW.
10034  * This is dependent on the device's receive header entry size.
10035  * HFI allows this to be set per-receive context, but the
10036  * driver presently enforces a global value.
10037  */
10038 u32 lrh_max_header_bytes(struct hfi1_devdata *dd)
10039 {
10040         /*
10041          * The maximum non-payload (MTU) bytes in LRH.PktLen are
10042          * the Receive Header Entry Size minus the PBC (or RHF) size
10043          * plus one DW for the ICRC appended by HW.
10044          *
10045          * dd->rcd[0].rcvhdrqentsize is in DW.
10046          * We use rcd[0] as all context will have the same value. Also,
10047          * the first kernel context would have been allocated by now so
10048          * we are guaranteed a valid value.
10049          */
10050         return (dd->rcd[0]->rcvhdrqentsize - 2/*PBC/RHF*/ + 1/*ICRC*/) << 2;
10051 }
10052
10053 /*
10054  * Set Send Length
10055  * @ppd - per port data
10056  *
10057  * Set the MTU by limiting how many DWs may be sent.  The SendLenCheck*
10058  * registers compare against LRH.PktLen, so use the max bytes included
10059  * in the LRH.
10060  *
10061  * This routine changes all VL values except VL15, which it maintains at
10062  * the same value.
10063  */
10064 static void set_send_length(struct hfi1_pportdata *ppd)
10065 {
10066         struct hfi1_devdata *dd = ppd->dd;
10067         u32 max_hb = lrh_max_header_bytes(dd), dcmtu;
10068         u32 maxvlmtu = dd->vld[15].mtu;
10069         u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
10070                               & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
10071                 SEND_LEN_CHECK1_LEN_VL15_SHIFT;
10072         int i, j;
10073         u32 thres;
10074
10075         for (i = 0; i < ppd->vls_supported; i++) {
10076                 if (dd->vld[i].mtu > maxvlmtu)
10077                         maxvlmtu = dd->vld[i].mtu;
10078                 if (i <= 3)
10079                         len1 |= (((dd->vld[i].mtu + max_hb) >> 2)
10080                                  & SEND_LEN_CHECK0_LEN_VL0_MASK) <<
10081                                 ((i % 4) * SEND_LEN_CHECK0_LEN_VL1_SHIFT);
10082                 else
10083                         len2 |= (((dd->vld[i].mtu + max_hb) >> 2)
10084                                  & SEND_LEN_CHECK1_LEN_VL4_MASK) <<
10085                                 ((i % 4) * SEND_LEN_CHECK1_LEN_VL5_SHIFT);
10086         }
10087         write_csr(dd, SEND_LEN_CHECK0, len1);
10088         write_csr(dd, SEND_LEN_CHECK1, len2);
10089         /* adjust kernel credit return thresholds based on new MTUs */
10090         /* all kernel receive contexts have the same hdrqentsize */
10091         for (i = 0; i < ppd->vls_supported; i++) {
10092                 thres = min(sc_percent_to_threshold(dd->vld[i].sc, 50),
10093                             sc_mtu_to_threshold(dd->vld[i].sc,
10094                                                 dd->vld[i].mtu,
10095                                                 dd->rcd[0]->rcvhdrqentsize));
10096                 for (j = 0; j < INIT_SC_PER_VL; j++)
10097                         sc_set_cr_threshold(
10098                                         pio_select_send_context_vl(dd, j, i),
10099                                             thres);
10100         }
10101         thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
10102                     sc_mtu_to_threshold(dd->vld[15].sc,
10103                                         dd->vld[15].mtu,
10104                                         dd->rcd[0]->rcvhdrqentsize));
10105         sc_set_cr_threshold(dd->vld[15].sc, thres);
10106
10107         /* Adjust maximum MTU for the port in DC */
10108         dcmtu = maxvlmtu == 10240 ? DCC_CFG_PORT_MTU_CAP_10240 :
10109                 (ilog2(maxvlmtu >> 8) + 1);
10110         len1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG);
10111         len1 &= ~DCC_CFG_PORT_CONFIG_MTU_CAP_SMASK;
10112         len1 |= ((u64)dcmtu & DCC_CFG_PORT_CONFIG_MTU_CAP_MASK) <<
10113                 DCC_CFG_PORT_CONFIG_MTU_CAP_SHIFT;
10114         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG, len1);
10115 }
10116
10117 static void set_lidlmc(struct hfi1_pportdata *ppd)
10118 {
10119         int i;
10120         u64 sreg = 0;
10121         struct hfi1_devdata *dd = ppd->dd;
10122         u32 mask = ~((1U << ppd->lmc) - 1);
10123         u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1);
10124         u32 lid;
10125
10126         /*
10127          * Program 0 in CSR if port lid is extended. This prevents
10128          * 9B packets being sent out for large lids.
10129          */
10130         lid = (ppd->lid >= be16_to_cpu(IB_MULTICAST_LID_BASE)) ? 0 : ppd->lid;
10131         c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK
10132                 | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK);
10133         c1 |= ((lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK)
10134                         << DCC_CFG_PORT_CONFIG1_TARGET_DLID_SHIFT) |
10135               ((mask & DCC_CFG_PORT_CONFIG1_DLID_MASK_MASK)
10136                         << DCC_CFG_PORT_CONFIG1_DLID_MASK_SHIFT);
10137         write_csr(ppd->dd, DCC_CFG_PORT_CONFIG1, c1);
10138
10139         /*
10140          * Iterate over all the send contexts and set their SLID check
10141          */
10142         sreg = ((mask & SEND_CTXT_CHECK_SLID_MASK_MASK) <<
10143                         SEND_CTXT_CHECK_SLID_MASK_SHIFT) |
10144                (((lid & mask) & SEND_CTXT_CHECK_SLID_VALUE_MASK) <<
10145                         SEND_CTXT_CHECK_SLID_VALUE_SHIFT);
10146
10147         for (i = 0; i < chip_send_contexts(dd); i++) {
10148                 hfi1_cdbg(LINKVERB, "SendContext[%d].SLID_CHECK = 0x%x",
10149                           i, (u32)sreg);
10150                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, sreg);
10151         }
10152
10153         /* Now we have to do the same thing for the sdma engines */
10154         sdma_update_lmc(dd, mask, lid);
10155 }
10156
10157 static const char *state_completed_string(u32 completed)
10158 {
10159         static const char * const state_completed[] = {
10160                 "EstablishComm",
10161                 "OptimizeEQ",
10162                 "VerifyCap"
10163         };
10164
10165         if (completed < ARRAY_SIZE(state_completed))
10166                 return state_completed[completed];
10167
10168         return "unknown";
10169 }
10170
10171 static const char all_lanes_dead_timeout_expired[] =
10172         "All lanes were inactive â€“ was the interconnect media removed?";
10173 static const char tx_out_of_policy[] =
10174         "Passing lanes on local port do not meet the local link width policy";
10175 static const char no_state_complete[] =
10176         "State timeout occurred before link partner completed the state";
10177 static const char * const state_complete_reasons[] = {
10178         [0x00] = "Reason unknown",
10179         [0x01] = "Link was halted by driver, refer to LinkDownReason",
10180         [0x02] = "Link partner reported failure",
10181         [0x10] = "Unable to achieve frame sync on any lane",
10182         [0x11] =
10183           "Unable to find a common bit rate with the link partner",
10184         [0x12] =
10185           "Unable to achieve frame sync on sufficient lanes to meet the local link width policy",
10186         [0x13] =
10187           "Unable to identify preset equalization on sufficient lanes to meet the local link width policy",
10188         [0x14] = no_state_complete,
10189         [0x15] =
10190           "State timeout occurred before link partner identified equalization presets",
10191         [0x16] =
10192           "Link partner completed the EstablishComm state, but the passing lanes do not meet the local link width policy",
10193         [0x17] = tx_out_of_policy,
10194         [0x20] = all_lanes_dead_timeout_expired,
10195         [0x21] =
10196           "Unable to achieve acceptable BER on sufficient lanes to meet the local link width policy",
10197         [0x22] = no_state_complete,
10198         [0x23] =
10199           "Link partner completed the OptimizeEq state, but the passing lanes do not meet the local link width policy",
10200         [0x24] = tx_out_of_policy,
10201         [0x30] = all_lanes_dead_timeout_expired,
10202         [0x31] =
10203           "State timeout occurred waiting for host to process received frames",
10204         [0x32] = no_state_complete,
10205         [0x33] =
10206           "Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
10207         [0x34] = tx_out_of_policy,
10208         [0x35] = "Negotiated link width is mutually exclusive",
10209         [0x36] =
10210           "Timed out before receiving verifycap frames in VerifyCap.Exchange",
10211         [0x37] = "Unable to resolve secure data exchange",
10212 };
10213
10214 static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
10215                                                      u32 code)
10216 {
10217         const char *str = NULL;
10218
10219         if (code < ARRAY_SIZE(state_complete_reasons))
10220                 str = state_complete_reasons[code];
10221
10222         if (str)
10223                 return str;
10224         return "Reserved";
10225 }
10226
10227 /* describe the given last state complete frame */
10228 static void decode_state_complete(struct hfi1_pportdata *ppd, u32 frame,
10229                                   const char *prefix)
10230 {
10231         struct hfi1_devdata *dd = ppd->dd;
10232         u32 success;
10233         u32 state;
10234         u32 reason;
10235         u32 lanes;
10236
10237         /*
10238          * Decode frame:
10239          *  [ 0: 0] - success
10240          *  [ 3: 1] - state
10241          *  [ 7: 4] - next state timeout
10242          *  [15: 8] - reason code
10243          *  [31:16] - lanes
10244          */
10245         success = frame & 0x1;
10246         state = (frame >> 1) & 0x7;
10247         reason = (frame >> 8) & 0xff;
10248         lanes = (frame >> 16) & 0xffff;
10249
10250         dd_dev_err(dd, "Last %s LNI state complete frame 0x%08x:\n",
10251                    prefix, frame);
10252         dd_dev_err(dd, "    last reported state state: %s (0x%x)\n",
10253                    state_completed_string(state), state);
10254         dd_dev_err(dd, "    state successfully completed: %s\n",
10255                    success ? "yes" : "no");
10256         dd_dev_err(dd, "    fail reason 0x%x: %s\n",
10257                    reason, state_complete_reason_code_string(ppd, reason));
10258         dd_dev_err(dd, "    passing lane mask: 0x%x", lanes);
10259 }
10260
10261 /*
10262  * Read the last state complete frames and explain them.  This routine
10263  * expects to be called if the link went down during link negotiation
10264  * and initialization (LNI).  That is, anywhere between polling and link up.
10265  */
10266 static void check_lni_states(struct hfi1_pportdata *ppd)
10267 {
10268         u32 last_local_state;
10269         u32 last_remote_state;
10270
10271         read_last_local_state(ppd->dd, &last_local_state);
10272         read_last_remote_state(ppd->dd, &last_remote_state);
10273
10274         /*
10275          * Don't report anything if there is nothing to report.  A value of
10276          * 0 means the link was taken down while polling and there was no
10277          * training in-process.
10278          */
10279         if (last_local_state == 0 && last_remote_state == 0)
10280                 return;
10281
10282         decode_state_complete(ppd, last_local_state, "transmitted");
10283         decode_state_complete(ppd, last_remote_state, "received");
10284 }
10285
10286 /* wait for wait_ms for LINK_TRANSFER_ACTIVE to go to 1 */
10287 static int wait_link_transfer_active(struct hfi1_devdata *dd, int wait_ms)
10288 {
10289         u64 reg;
10290         unsigned long timeout;
10291
10292         /* watch LCB_STS_LINK_TRANSFER_ACTIVE */
10293         timeout = jiffies + msecs_to_jiffies(wait_ms);
10294         while (1) {
10295                 reg = read_csr(dd, DC_LCB_STS_LINK_TRANSFER_ACTIVE);
10296                 if (reg)
10297                         break;
10298                 if (time_after(jiffies, timeout)) {
10299                         dd_dev_err(dd,
10300                                    "timeout waiting for LINK_TRANSFER_ACTIVE\n");
10301                         return -ETIMEDOUT;
10302                 }
10303                 udelay(2);
10304         }
10305         return 0;
10306 }
10307
10308 /* called when the logical link state is not down as it should be */
10309 static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
10310 {
10311         struct hfi1_devdata *dd = ppd->dd;
10312
10313         /*
10314          * Bring link up in LCB loopback
10315          */
10316         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10317         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK,
10318                   DC_LCB_CFG_IGNORE_LOST_RCLK_EN_SMASK);
10319
10320         write_csr(dd, DC_LCB_CFG_LANE_WIDTH, 0);
10321         write_csr(dd, DC_LCB_CFG_REINIT_AS_SLAVE, 0);
10322         write_csr(dd, DC_LCB_CFG_CNT_FOR_SKIP_STALL, 0x110);
10323         write_csr(dd, DC_LCB_CFG_LOOPBACK, 0x2);
10324
10325         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 0);
10326         (void)read_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET);
10327         udelay(3);
10328         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 1);
10329         write_csr(dd, DC_LCB_CFG_RUN, 1ull << DC_LCB_CFG_RUN_EN_SHIFT);
10330
10331         wait_link_transfer_active(dd, 100);
10332
10333         /*
10334          * Bring the link down again.
10335          */
10336         write_csr(dd, DC_LCB_CFG_TX_FIFOS_RESET, 1);
10337         write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
10338         write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
10339
10340         dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
10341 }
10342
10343 /*
10344  * Helper for set_link_state().  Do not call except from that routine.
10345  * Expects ppd->hls_mutex to be held.
10346  *
10347  * @rem_reason value to be sent to the neighbor
10348  *
10349  * LinkDownReasons only set if transition succeeds.
10350  */
10351 static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10352 {
10353         struct hfi1_devdata *dd = ppd->dd;
10354         u32 previous_state;
10355         int offline_state_ret;
10356         int ret;
10357
10358         update_lcb_cache(dd);
10359
10360         previous_state = ppd->host_link_state;
10361         ppd->host_link_state = HLS_GOING_OFFLINE;
10362
10363         /* start offline transition */
10364         ret = set_physical_link_state(dd, (rem_reason << 8) | PLS_OFFLINE);
10365
10366         if (ret != HCMD_SUCCESS) {
10367                 dd_dev_err(dd,
10368                            "Failed to transition to Offline link state, return %d\n",
10369                            ret);
10370                 return -EINVAL;
10371         }
10372         if (ppd->offline_disabled_reason ==
10373                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE))
10374                 ppd->offline_disabled_reason =
10375                 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10376
10377         offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10378         if (offline_state_ret < 0)
10379                 return offline_state_ret;
10380
10381         /* Disabling AOC transmitters */
10382         if (ppd->port_type == PORT_TYPE_QSFP &&
10383             ppd->qsfp_info.limiting_active &&
10384             qsfp_mod_present(ppd)) {
10385                 int ret;
10386
10387                 ret = acquire_chip_resource(dd, qsfp_resource(dd), QSFP_WAIT);
10388                 if (ret == 0) {
10389                         set_qsfp_tx(ppd, 0);
10390                         release_chip_resource(dd, qsfp_resource(dd));
10391                 } else {
10392                         /* not fatal, but should warn */
10393                         dd_dev_err(dd,
10394                                    "Unable to acquire lock to turn off QSFP TX\n");
10395                 }
10396         }
10397
10398         /*
10399          * Wait for the offline.Quiet transition if it hasn't happened yet. It
10400          * can take a while for the link to go down.
10401          */
10402         if (offline_state_ret != PLS_OFFLINE_QUIET) {
10403                 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10404                 if (ret < 0)
10405                         return ret;
10406         }
10407
10408         /*
10409          * Now in charge of LCB - must be after the physical state is
10410          * offline.quiet and before host_link_state is changed.
10411          */
10412         set_host_lcb_access(dd);
10413         write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10414
10415         /* make sure the logical state is also down */
10416         ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10417         if (ret)
10418                 force_logical_link_state_down(ppd);
10419
10420         ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10421         update_statusp(ppd, IB_PORT_DOWN);
10422
10423         /*
10424          * The LNI has a mandatory wait time after the physical state
10425          * moves to Offline.Quiet.  The wait time may be different
10426          * depending on how the link went down.  The 8051 firmware
10427          * will observe the needed wait time and only move to ready
10428          * when that is completed.  The largest of the quiet timeouts
10429          * is 6s, so wait that long and then at least 0.5s more for
10430          * other transitions, and another 0.5s for a buffer.
10431          */
10432         ret = wait_fm_ready(dd, 7000);
10433         if (ret) {
10434                 dd_dev_err(dd,
10435                            "After going offline, timed out waiting for the 8051 to become ready to accept host requests\n");
10436                 /* state is really offline, so make it so */
10437                 ppd->host_link_state = HLS_DN_OFFLINE;
10438                 return ret;
10439         }
10440
10441         /*
10442          * The state is now offline and the 8051 is ready to accept host
10443          * requests.
10444          *      - change our state
10445          *      - notify others if we were previously in a linkup state
10446          */
10447         ppd->host_link_state = HLS_DN_OFFLINE;
10448         if (previous_state & HLS_UP) {
10449                 /* went down while link was up */
10450                 handle_linkup_change(dd, 0);
10451         } else if (previous_state
10452                         & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10453                 /* went down while attempting link up */
10454                 check_lni_states(ppd);
10455
10456                 /* The QSFP doesn't need to be reset on LNI failure */
10457                 ppd->qsfp_info.reset_needed = 0;
10458         }
10459
10460         /* the active link width (downgrade) is 0 on link down */
10461         ppd->link_width_active = 0;
10462         ppd->link_width_downgrade_tx_active = 0;
10463         ppd->link_width_downgrade_rx_active = 0;
10464         ppd->current_egress_rate = 0;
10465         return 0;
10466 }
10467
10468 /* return the link state name */
10469 static const char *link_state_name(u32 state)
10470 {
10471         const char *name;
10472         int n = ilog2(state);
10473         static const char * const names[] = {
10474                 [__HLS_UP_INIT_BP]       = "INIT",
10475                 [__HLS_UP_ARMED_BP]      = "ARMED",
10476                 [__HLS_UP_ACTIVE_BP]     = "ACTIVE",
10477                 [__HLS_DN_DOWNDEF_BP]    = "DOWNDEF",
10478                 [__HLS_DN_POLL_BP]       = "POLL",
10479                 [__HLS_DN_DISABLE_BP]    = "DISABLE",
10480                 [__HLS_DN_OFFLINE_BP]    = "OFFLINE",
10481                 [__HLS_VERIFY_CAP_BP]    = "VERIFY_CAP",
10482                 [__HLS_GOING_UP_BP]      = "GOING_UP",
10483                 [__HLS_GOING_OFFLINE_BP] = "GOING_OFFLINE",
10484                 [__HLS_LINK_COOLDOWN_BP] = "LINK_COOLDOWN"
10485         };
10486
10487         name = n < ARRAY_SIZE(names) ? names[n] : NULL;
10488         return name ? name : "unknown";
10489 }
10490
10491 /* return the link state reason name */
10492 static const char *link_state_reason_name(struct hfi1_pportdata *ppd, u32 state)
10493 {
10494         if (state == HLS_UP_INIT) {
10495                 switch (ppd->linkinit_reason) {
10496                 case OPA_LINKINIT_REASON_LINKUP:
10497                         return "(LINKUP)";
10498                 case OPA_LINKINIT_REASON_FLAPPING:
10499                         return "(FLAPPING)";
10500                 case OPA_LINKINIT_OUTSIDE_POLICY:
10501                         return "(OUTSIDE_POLICY)";
10502                 case OPA_LINKINIT_QUARANTINED:
10503                         return "(QUARANTINED)";
10504                 case OPA_LINKINIT_INSUFIC_CAPABILITY:
10505                         return "(INSUFIC_CAPABILITY)";
10506                 default:
10507                         break;
10508                 }
10509         }
10510         return "";
10511 }
10512
10513 /*
10514  * driver_pstate - convert the driver's notion of a port's
10515  * state (an HLS_*) into a physical state (a {IB,OPA}_PORTPHYSSTATE_*).
10516  * Return -1 (converted to a u32) to indicate error.
10517  */
10518 u32 driver_pstate(struct hfi1_pportdata *ppd)
10519 {
10520         switch (ppd->host_link_state) {
10521         case HLS_UP_INIT:
10522         case HLS_UP_ARMED:
10523         case HLS_UP_ACTIVE:
10524                 return IB_PORTPHYSSTATE_LINKUP;
10525         case HLS_DN_POLL:
10526                 return IB_PORTPHYSSTATE_POLLING;
10527         case HLS_DN_DISABLE:
10528                 return IB_PORTPHYSSTATE_DISABLED;
10529         case HLS_DN_OFFLINE:
10530                 return OPA_PORTPHYSSTATE_OFFLINE;
10531         case HLS_VERIFY_CAP:
10532                 return IB_PORTPHYSSTATE_TRAINING;
10533         case HLS_GOING_UP:
10534                 return IB_PORTPHYSSTATE_TRAINING;
10535         case HLS_GOING_OFFLINE:
10536                 return OPA_PORTPHYSSTATE_OFFLINE;
10537         case HLS_LINK_COOLDOWN:
10538                 return OPA_PORTPHYSSTATE_OFFLINE;
10539         case HLS_DN_DOWNDEF:
10540         default:
10541                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10542                            ppd->host_link_state);
10543                 return  -1;
10544         }
10545 }
10546
10547 /*
10548  * driver_lstate - convert the driver's notion of a port's
10549  * state (an HLS_*) into a logical state (a IB_PORT_*). Return -1
10550  * (converted to a u32) to indicate error.
10551  */
10552 u32 driver_lstate(struct hfi1_pportdata *ppd)
10553 {
10554         if (ppd->host_link_state && (ppd->host_link_state & HLS_DOWN))
10555                 return IB_PORT_DOWN;
10556
10557         switch (ppd->host_link_state & HLS_UP) {
10558         case HLS_UP_INIT:
10559                 return IB_PORT_INIT;
10560         case HLS_UP_ARMED:
10561                 return IB_PORT_ARMED;
10562         case HLS_UP_ACTIVE:
10563                 return IB_PORT_ACTIVE;
10564         default:
10565                 dd_dev_err(ppd->dd, "invalid host_link_state 0x%x\n",
10566                            ppd->host_link_state);
10567         return -1;
10568         }
10569 }
10570
10571 void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
10572                           u8 neigh_reason, u8 rem_reason)
10573 {
10574         if (ppd->local_link_down_reason.latest == 0 &&
10575             ppd->neigh_link_down_reason.latest == 0) {
10576                 ppd->local_link_down_reason.latest = lcl_reason;
10577                 ppd->neigh_link_down_reason.latest = neigh_reason;
10578                 ppd->remote_link_down_reason = rem_reason;
10579         }
10580 }
10581
10582 /**
10583  * data_vls_operational() - Verify if data VL BCT credits and MTU
10584  *                          are both set.
10585  * @ppd: pointer to hfi1_pportdata structure
10586  *
10587  * Return: true - Ok, false -otherwise.
10588  */
10589 static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
10590 {
10591         int i;
10592         u64 reg;
10593
10594         if (!ppd->actual_vls_operational)
10595                 return false;
10596
10597         for (i = 0; i < ppd->vls_supported; i++) {
10598                 reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
10599                 if ((reg && !ppd->dd->vld[i].mtu) ||
10600                     (!reg && ppd->dd->vld[i].mtu))
10601                         return false;
10602         }
10603
10604         return true;
10605 }
10606
10607 /*
10608  * Change the physical and/or logical link state.
10609  *
10610  * Do not call this routine while inside an interrupt.  It contains
10611  * calls to routines that can take multiple seconds to finish.
10612  *
10613  * Returns 0 on success, -errno on failure.
10614  */
10615 int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10616 {
10617         struct hfi1_devdata *dd = ppd->dd;
10618         struct ib_event event = {.device = NULL};
10619         int ret1, ret = 0;
10620         int orig_new_state, poll_bounce;
10621
10622         mutex_lock(&ppd->hls_lock);
10623
10624         orig_new_state = state;
10625         if (state == HLS_DN_DOWNDEF)
10626                 state = HLS_DEFAULT;
10627
10628         /* interpret poll -> poll as a link bounce */
10629         poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
10630                       state == HLS_DN_POLL;
10631
10632         dd_dev_info(dd, "%s: current %s, new %s %s%s\n", __func__,
10633                     link_state_name(ppd->host_link_state),
10634                     link_state_name(orig_new_state),
10635                     poll_bounce ? "(bounce) " : "",
10636                     link_state_reason_name(ppd, state));
10637
10638         /*
10639          * If we're going to a (HLS_*) link state that implies the logical
10640          * link state is neither of (IB_PORT_ARMED, IB_PORT_ACTIVE), then
10641          * reset is_sm_config_started to 0.
10642          */
10643         if (!(state & (HLS_UP_ARMED | HLS_UP_ACTIVE)))
10644                 ppd->is_sm_config_started = 0;
10645
10646         /*
10647          * Do nothing if the states match.  Let a poll to poll link bounce
10648          * go through.
10649          */
10650         if (ppd->host_link_state == state && !poll_bounce)
10651                 goto done;
10652
10653         switch (state) {
10654         case HLS_UP_INIT:
10655                 if (ppd->host_link_state == HLS_DN_POLL &&
10656                     (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) {
10657                         /*
10658                          * Quick link up jumps from polling to here.
10659                          *
10660                          * Whether in normal or loopback mode, the
10661                          * simulator jumps from polling to link up.
10662                          * Accept that here.
10663                          */
10664                         /* OK */
10665                 } else if (ppd->host_link_state != HLS_GOING_UP) {
10666                         goto unexpected;
10667                 }
10668
10669                 /*
10670                  * Wait for Link_Up physical state.
10671                  * Physical and Logical states should already be
10672                  * be transitioned to LinkUp and LinkInit respectively.
10673                  */
10674                 ret = wait_physical_linkstate(ppd, PLS_LINKUP, 1000);
10675                 if (ret) {
10676                         dd_dev_err(dd,
10677                                    "%s: physical state did not change to LINK-UP\n",
10678                                    __func__);
10679                         break;
10680                 }
10681
10682                 ret = wait_logical_linkstate(ppd, IB_PORT_INIT, 1000);
10683                 if (ret) {
10684                         dd_dev_err(dd,
10685                                    "%s: logical state did not change to INIT\n",
10686                                    __func__);
10687                         break;
10688                 }
10689
10690                 /* clear old transient LINKINIT_REASON code */
10691                 if (ppd->linkinit_reason >= OPA_LINKINIT_REASON_CLEAR)
10692                         ppd->linkinit_reason =
10693                                 OPA_LINKINIT_REASON_LINKUP;
10694
10695                 /* enable the port */
10696                 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10697
10698                 handle_linkup_change(dd, 1);
10699                 pio_kernel_linkup(dd);
10700
10701                 /*
10702                  * After link up, a new link width will have been set.
10703                  * Update the xmit counters with regards to the new
10704                  * link width.
10705                  */
10706                 update_xmit_counters(ppd, ppd->link_width_active);
10707
10708                 ppd->host_link_state = HLS_UP_INIT;
10709                 update_statusp(ppd, IB_PORT_INIT);
10710                 break;
10711         case HLS_UP_ARMED:
10712                 if (ppd->host_link_state != HLS_UP_INIT)
10713                         goto unexpected;
10714
10715                 if (!data_vls_operational(ppd)) {
10716                         dd_dev_err(dd,
10717                                    "%s: Invalid data VL credits or mtu\n",
10718                                    __func__);
10719                         ret = -EINVAL;
10720                         break;
10721                 }
10722
10723                 set_logical_state(dd, LSTATE_ARMED);
10724                 ret = wait_logical_linkstate(ppd, IB_PORT_ARMED, 1000);
10725                 if (ret) {
10726                         dd_dev_err(dd,
10727                                    "%s: logical state did not change to ARMED\n",
10728                                    __func__);
10729                         break;
10730                 }
10731                 ppd->host_link_state = HLS_UP_ARMED;
10732                 update_statusp(ppd, IB_PORT_ARMED);
10733                 /*
10734                  * The simulator does not currently implement SMA messages,
10735                  * so neighbor_normal is not set.  Set it here when we first
10736                  * move to Armed.
10737                  */
10738                 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR)
10739                         ppd->neighbor_normal = 1;
10740                 break;
10741         case HLS_UP_ACTIVE:
10742                 if (ppd->host_link_state != HLS_UP_ARMED)
10743                         goto unexpected;
10744
10745                 set_logical_state(dd, LSTATE_ACTIVE);
10746                 ret = wait_logical_linkstate(ppd, IB_PORT_ACTIVE, 1000);
10747                 if (ret) {
10748                         dd_dev_err(dd,
10749                                    "%s: logical state did not change to ACTIVE\n",
10750                                    __func__);
10751                 } else {
10752                         /* tell all engines to go running */
10753                         sdma_all_running(dd);
10754                         ppd->host_link_state = HLS_UP_ACTIVE;
10755                         update_statusp(ppd, IB_PORT_ACTIVE);
10756
10757                         /* Signal the IB layer that the port has went active */
10758                         event.device = &dd->verbs_dev.rdi.ibdev;
10759                         event.element.port_num = ppd->port;
10760                         event.event = IB_EVENT_PORT_ACTIVE;
10761                 }
10762                 break;
10763         case HLS_DN_POLL:
10764                 if ((ppd->host_link_state == HLS_DN_DISABLE ||
10765                      ppd->host_link_state == HLS_DN_OFFLINE) &&
10766                     dd->dc_shutdown)
10767                         dc_start(dd);
10768                 /* Hand LED control to the DC */
10769                 write_csr(dd, DCC_CFG_LED_CNTRL, 0);
10770
10771                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10772                         u8 tmp = ppd->link_enabled;
10773
10774                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10775                         if (ret) {
10776                                 ppd->link_enabled = tmp;
10777                                 break;
10778                         }
10779                         ppd->remote_link_down_reason = 0;
10780
10781                         if (ppd->driver_link_ready)
10782                                 ppd->link_enabled = 1;
10783                 }
10784
10785                 set_all_slowpath(ppd->dd);
10786                 ret = set_local_link_attributes(ppd);
10787                 if (ret)
10788                         break;
10789
10790                 ppd->port_error_action = 0;
10791
10792                 if (quick_linkup) {
10793                         /* quick linkup does not go into polling */
10794                         ret = do_quick_linkup(dd);
10795                 } else {
10796                         ret1 = set_physical_link_state(dd, PLS_POLLING);
10797                         if (!ret1)
10798                                 ret1 = wait_phys_link_out_of_offline(ppd,
10799                                                                      3000);
10800                         if (ret1 != HCMD_SUCCESS) {
10801                                 dd_dev_err(dd,
10802                                            "Failed to transition to Polling link state, return 0x%x\n",
10803                                            ret1);
10804                                 ret = -EINVAL;
10805                         }
10806                 }
10807
10808                 /*
10809                  * Change the host link state after requesting DC8051 to
10810                  * change its physical state so that we can ignore any
10811                  * interrupt with stale LNI(XX) error, which will not be
10812                  * cleared until DC8051 transitions to Polling state.
10813                  */
10814                 ppd->host_link_state = HLS_DN_POLL;
10815                 ppd->offline_disabled_reason =
10816                         HFI1_ODR_MASK(OPA_LINKDOWN_REASON_NONE);
10817                 /*
10818                  * If an error occurred above, go back to offline.  The
10819                  * caller may reschedule another attempt.
10820                  */
10821                 if (ret)
10822                         goto_offline(ppd, 0);
10823                 else
10824                         log_physical_state(ppd, PLS_POLLING);
10825                 break;
10826         case HLS_DN_DISABLE:
10827                 /* link is disabled */
10828                 ppd->link_enabled = 0;
10829
10830                 /* allow any state to transition to disabled */
10831
10832                 /* must transition to offline first */
10833                 if (ppd->host_link_state != HLS_DN_OFFLINE) {
10834                         ret = goto_offline(ppd, ppd->remote_link_down_reason);
10835                         if (ret)
10836                                 break;
10837                         ppd->remote_link_down_reason = 0;
10838                 }
10839
10840                 if (!dd->dc_shutdown) {
10841                         ret1 = set_physical_link_state(dd, PLS_DISABLED);
10842                         if (ret1 != HCMD_SUCCESS) {
10843                                 dd_dev_err(dd,
10844                                            "Failed to transition to Disabled link state, return 0x%x\n",
10845                                            ret1);
10846                                 ret = -EINVAL;
10847                                 break;
10848                         }
10849                         ret = wait_physical_linkstate(ppd, PLS_DISABLED, 10000);
10850                         if (ret) {
10851                                 dd_dev_err(dd,
10852                                            "%s: physical state did not change to DISABLED\n",
10853                                            __func__);
10854                                 break;
10855                         }
10856                         dc_shutdown(dd);
10857                 }
10858                 ppd->host_link_state = HLS_DN_DISABLE;
10859                 break;
10860         case HLS_DN_OFFLINE:
10861                 if (ppd->host_link_state == HLS_DN_DISABLE)
10862                         dc_start(dd);
10863
10864                 /* allow any state to transition to offline */
10865                 ret = goto_offline(ppd, ppd->remote_link_down_reason);
10866                 if (!ret)
10867                         ppd->remote_link_down_reason = 0;
10868                 break;
10869         case HLS_VERIFY_CAP:
10870                 if (ppd->host_link_state != HLS_DN_POLL)
10871                         goto unexpected;
10872                 ppd->host_link_state = HLS_VERIFY_CAP;
10873                 log_physical_state(ppd, PLS_CONFIGPHY_VERIFYCAP);
10874                 break;
10875         case HLS_GOING_UP:
10876                 if (ppd->host_link_state != HLS_VERIFY_CAP)
10877                         goto unexpected;
10878
10879                 ret1 = set_physical_link_state(dd, PLS_LINKUP);
10880                 if (ret1 != HCMD_SUCCESS) {
10881                         dd_dev_err(dd,
10882                                    "Failed to transition to link up state, return 0x%x\n",
10883                                    ret1);
10884                         ret = -EINVAL;
10885                         break;
10886                 }
10887                 ppd->host_link_state = HLS_GOING_UP;
10888                 break;
10889
10890         case HLS_GOING_OFFLINE:         /* transient within goto_offline() */
10891         case HLS_LINK_COOLDOWN:         /* transient within goto_offline() */
10892         default:
10893                 dd_dev_info(dd, "%s: state 0x%x: not supported\n",
10894                             __func__, state);
10895                 ret = -EINVAL;
10896                 break;
10897         }
10898
10899         goto done;
10900
10901 unexpected:
10902         dd_dev_err(dd, "%s: unexpected state transition from %s to %s\n",
10903                    __func__, link_state_name(ppd->host_link_state),
10904                    link_state_name(state));
10905         ret = -EINVAL;
10906
10907 done:
10908         mutex_unlock(&ppd->hls_lock);
10909
10910         if (event.device)
10911                 ib_dispatch_event(&event);
10912
10913         return ret;
10914 }
10915
10916 int hfi1_set_ib_cfg(struct hfi1_pportdata *ppd, int which, u32 val)
10917 {
10918         u64 reg;
10919         int ret = 0;
10920
10921         switch (which) {
10922         case HFI1_IB_CFG_LIDLMC:
10923                 set_lidlmc(ppd);
10924                 break;
10925         case HFI1_IB_CFG_VL_HIGH_LIMIT:
10926                 /*
10927                  * The VL Arbitrator high limit is sent in units of 4k
10928                  * bytes, while HFI stores it in units of 64 bytes.
10929                  */
10930                 val *= 4096 / 64;
10931                 reg = ((u64)val & SEND_HIGH_PRIORITY_LIMIT_LIMIT_MASK)
10932                         << SEND_HIGH_PRIORITY_LIMIT_LIMIT_SHIFT;
10933                 write_csr(ppd->dd, SEND_HIGH_PRIORITY_LIMIT, reg);
10934                 break;
10935         case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
10936                 /* HFI only supports POLL as the default link down state */
10937                 if (val != HLS_DN_POLL)
10938                         ret = -EINVAL;
10939                 break;
10940         case HFI1_IB_CFG_OP_VLS:
10941                 if (ppd->vls_operational != val) {
10942                         ppd->vls_operational = val;
10943                         if (!ppd->port)
10944                                 ret = -EINVAL;
10945                 }
10946                 break;
10947         /*
10948          * For link width, link width downgrade, and speed enable, always AND
10949          * the setting with what is actually supported.  This has two benefits.
10950          * First, enabled can't have unsupported values, no matter what the
10951          * SM or FM might want.  Second, the ALL_SUPPORTED wildcards that mean
10952          * "fill in with your supported value" have all the bits in the
10953          * field set, so simply ANDing with supported has the desired result.
10954          */
10955         case HFI1_IB_CFG_LWID_ENB: /* set allowed Link-width */
10956                 ppd->link_width_enabled = val & ppd->link_width_supported;
10957                 break;
10958         case HFI1_IB_CFG_LWID_DG_ENB: /* set allowed link width downgrade */
10959                 ppd->link_width_downgrade_enabled =
10960                                 val & ppd->link_width_downgrade_supported;
10961                 break;
10962         case HFI1_IB_CFG_SPD_ENB: /* allowed Link speeds */
10963                 ppd->link_speed_enabled = val & ppd->link_speed_supported;
10964                 break;
10965         case HFI1_IB_CFG_OVERRUN_THRESH: /* IB overrun threshold */
10966                 /*
10967                  * HFI does not follow IB specs, save this value
10968                  * so we can report it, if asked.
10969                  */
10970                 ppd->overrun_threshold = val;
10971                 break;
10972         case HFI1_IB_CFG_PHYERR_THRESH: /* IB PHY error threshold */
10973                 /*
10974                  * HFI does not follow IB specs, save this value
10975                  * so we can report it, if asked.
10976                  */
10977                 ppd->phy_error_threshold = val;
10978                 break;
10979
10980         case HFI1_IB_CFG_MTU:
10981                 set_send_length(ppd);
10982                 break;
10983
10984         case HFI1_IB_CFG_PKEYS:
10985                 if (HFI1_CAP_IS_KSET(PKEY_CHECK))
10986                         set_partition_keys(ppd);
10987                 break;
10988
10989         default:
10990                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
10991                         dd_dev_info(ppd->dd,
10992                                     "%s: which %s, val 0x%x: not implemented\n",
10993                                     __func__, ib_cfg_name(which), val);
10994                 break;
10995         }
10996         return ret;
10997 }
10998
10999 /* begin functions related to vl arbitration table caching */
11000 static void init_vl_arb_caches(struct hfi1_pportdata *ppd)
11001 {
11002         int i;
11003
11004         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11005                         VL_ARB_LOW_PRIO_TABLE_SIZE);
11006         BUILD_BUG_ON(VL_ARB_TABLE_SIZE !=
11007                         VL_ARB_HIGH_PRIO_TABLE_SIZE);
11008
11009         /*
11010          * Note that we always return values directly from the
11011          * 'vl_arb_cache' (and do no CSR reads) in response to a
11012          * 'Get(VLArbTable)'. This is obviously correct after a
11013          * 'Set(VLArbTable)', since the cache will then be up to
11014          * date. But it's also correct prior to any 'Set(VLArbTable)'
11015          * since then both the cache, and the relevant h/w registers
11016          * will be zeroed.
11017          */
11018
11019         for (i = 0; i < MAX_PRIO_TABLE; i++)
11020                 spin_lock_init(&ppd->vl_arb_cache[i].lock);
11021 }
11022
11023 /*
11024  * vl_arb_lock_cache
11025  *
11026  * All other vl_arb_* functions should be called only after locking
11027  * the cache.
11028  */
11029 static inline struct vl_arb_cache *
11030 vl_arb_lock_cache(struct hfi1_pportdata *ppd, int idx)
11031 {
11032         if (idx != LO_PRIO_TABLE && idx != HI_PRIO_TABLE)
11033                 return NULL;
11034         spin_lock(&ppd->vl_arb_cache[idx].lock);
11035         return &ppd->vl_arb_cache[idx];
11036 }
11037
11038 static inline void vl_arb_unlock_cache(struct hfi1_pportdata *ppd, int idx)
11039 {
11040         spin_unlock(&ppd->vl_arb_cache[idx].lock);
11041 }
11042
11043 static void vl_arb_get_cache(struct vl_arb_cache *cache,
11044                              struct ib_vl_weight_elem *vl)
11045 {
11046         memcpy(vl, cache->table, VL_ARB_TABLE_SIZE * sizeof(*vl));
11047 }
11048
11049 static void vl_arb_set_cache(struct vl_arb_cache *cache,
11050                              struct ib_vl_weight_elem *vl)
11051 {
11052         memcpy(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11053 }
11054
11055 static int vl_arb_match_cache(struct vl_arb_cache *cache,
11056                               struct ib_vl_weight_elem *vl)
11057 {
11058         return !memcmp(cache->table, vl, VL_ARB_TABLE_SIZE * sizeof(*vl));
11059 }
11060
11061 /* end functions related to vl arbitration table caching */
11062
11063 static int set_vl_weights(struct hfi1_pportdata *ppd, u32 target,
11064                           u32 size, struct ib_vl_weight_elem *vl)
11065 {
11066         struct hfi1_devdata *dd = ppd->dd;
11067         u64 reg;
11068         unsigned int i, is_up = 0;
11069         int drain, ret = 0;
11070
11071         mutex_lock(&ppd->hls_lock);
11072
11073         if (ppd->host_link_state & HLS_UP)
11074                 is_up = 1;
11075
11076         drain = !is_ax(dd) && is_up;
11077
11078         if (drain)
11079                 /*
11080                  * Before adjusting VL arbitration weights, empty per-VL
11081                  * FIFOs, otherwise a packet whose VL weight is being
11082                  * set to 0 could get stuck in a FIFO with no chance to
11083                  * egress.
11084                  */
11085                 ret = stop_drain_data_vls(dd);
11086
11087         if (ret) {
11088                 dd_dev_err(
11089                         dd,
11090                         "%s: cannot stop/drain VLs - refusing to change VL arbitration weights\n",
11091                         __func__);
11092                 goto err;
11093         }
11094
11095         for (i = 0; i < size; i++, vl++) {
11096                 /*
11097                  * NOTE: The low priority shift and mask are used here, but
11098                  * they are the same for both the low and high registers.
11099                  */
11100                 reg = (((u64)vl->vl & SEND_LOW_PRIORITY_LIST_VL_MASK)
11101                                 << SEND_LOW_PRIORITY_LIST_VL_SHIFT)
11102                       | (((u64)vl->weight
11103                                 & SEND_LOW_PRIORITY_LIST_WEIGHT_MASK)
11104                                 << SEND_LOW_PRIORITY_LIST_WEIGHT_SHIFT);
11105                 write_csr(dd, target + (i * 8), reg);
11106         }
11107         pio_send_control(dd, PSC_GLOBAL_VLARB_ENABLE);
11108
11109         if (drain)
11110                 open_fill_data_vls(dd); /* reopen all VLs */
11111
11112 err:
11113         mutex_unlock(&ppd->hls_lock);
11114
11115         return ret;
11116 }
11117
11118 /*
11119  * Read one credit merge VL register.
11120  */
11121 static void read_one_cm_vl(struct hfi1_devdata *dd, u32 csr,
11122                            struct vl_limit *vll)
11123 {
11124         u64 reg = read_csr(dd, csr);
11125
11126         vll->dedicated = cpu_to_be16(
11127                 (reg >> SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT)
11128                 & SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_MASK);
11129         vll->shared = cpu_to_be16(
11130                 (reg >> SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT)
11131                 & SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_MASK);
11132 }
11133
11134 /*
11135  * Read the current credit merge limits.
11136  */
11137 static int get_buffer_control(struct hfi1_devdata *dd,
11138                               struct buffer_control *bc, u16 *overall_limit)
11139 {
11140         u64 reg;
11141         int i;
11142
11143         /* not all entries are filled in */
11144         memset(bc, 0, sizeof(*bc));
11145
11146         /* OPA and HFI have a 1-1 mapping */
11147         for (i = 0; i < TXE_NUM_DATA_VL; i++)
11148                 read_one_cm_vl(dd, SEND_CM_CREDIT_VL + (8 * i), &bc->vl[i]);
11149
11150         /* NOTE: assumes that VL* and VL15 CSRs are bit-wise identical */
11151         read_one_cm_vl(dd, SEND_CM_CREDIT_VL15, &bc->vl[15]);
11152
11153         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11154         bc->overall_shared_limit = cpu_to_be16(
11155                 (reg >> SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT)
11156                 & SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK);
11157         if (overall_limit)
11158                 *overall_limit = (reg
11159                         >> SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT)
11160                         & SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_MASK;
11161         return sizeof(struct buffer_control);
11162 }
11163
11164 static int get_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11165 {
11166         u64 reg;
11167         int i;
11168
11169         /* each register contains 16 SC->VLnt mappings, 4 bits each */
11170         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_15_0);
11171         for (i = 0; i < sizeof(u64); i++) {
11172                 u8 byte = *(((u8 *)&reg) + i);
11173
11174                 dp->vlnt[2 * i] = byte & 0xf;
11175                 dp->vlnt[(2 * i) + 1] = (byte & 0xf0) >> 4;
11176         }
11177
11178         reg = read_csr(dd, DCC_CFG_SC_VL_TABLE_31_16);
11179         for (i = 0; i < sizeof(u64); i++) {
11180                 u8 byte = *(((u8 *)&reg) + i);
11181
11182                 dp->vlnt[16 + (2 * i)] = byte & 0xf;
11183                 dp->vlnt[16 + (2 * i) + 1] = (byte & 0xf0) >> 4;
11184         }
11185         return sizeof(struct sc2vlnt);
11186 }
11187
11188 static void get_vlarb_preempt(struct hfi1_devdata *dd, u32 nelems,
11189                               struct ib_vl_weight_elem *vl)
11190 {
11191         unsigned int i;
11192
11193         for (i = 0; i < nelems; i++, vl++) {
11194                 vl->vl = 0xf;
11195                 vl->weight = 0;
11196         }
11197 }
11198
11199 static void set_sc2vlnt(struct hfi1_devdata *dd, struct sc2vlnt *dp)
11200 {
11201         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0,
11202                   DC_SC_VL_VAL(15_0,
11203                                0, dp->vlnt[0] & 0xf,
11204                                1, dp->vlnt[1] & 0xf,
11205                                2, dp->vlnt[2] & 0xf,
11206                                3, dp->vlnt[3] & 0xf,
11207                                4, dp->vlnt[4] & 0xf,
11208                                5, dp->vlnt[5] & 0xf,
11209                                6, dp->vlnt[6] & 0xf,
11210                                7, dp->vlnt[7] & 0xf,
11211                                8, dp->vlnt[8] & 0xf,
11212                                9, dp->vlnt[9] & 0xf,
11213                                10, dp->vlnt[10] & 0xf,
11214                                11, dp->vlnt[11] & 0xf,
11215                                12, dp->vlnt[12] & 0xf,
11216                                13, dp->vlnt[13] & 0xf,
11217                                14, dp->vlnt[14] & 0xf,
11218                                15, dp->vlnt[15] & 0xf));
11219         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16,
11220                   DC_SC_VL_VAL(31_16,
11221                                16, dp->vlnt[16] & 0xf,
11222                                17, dp->vlnt[17] & 0xf,
11223                                18, dp->vlnt[18] & 0xf,
11224                                19, dp->vlnt[19] & 0xf,
11225                                20, dp->vlnt[20] & 0xf,
11226                                21, dp->vlnt[21] & 0xf,
11227                                22, dp->vlnt[22] & 0xf,
11228                                23, dp->vlnt[23] & 0xf,
11229                                24, dp->vlnt[24] & 0xf,
11230                                25, dp->vlnt[25] & 0xf,
11231                                26, dp->vlnt[26] & 0xf,
11232                                27, dp->vlnt[27] & 0xf,
11233                                28, dp->vlnt[28] & 0xf,
11234                                29, dp->vlnt[29] & 0xf,
11235                                30, dp->vlnt[30] & 0xf,
11236                                31, dp->vlnt[31] & 0xf));
11237 }
11238
11239 static void nonzero_msg(struct hfi1_devdata *dd, int idx, const char *what,
11240                         u16 limit)
11241 {
11242         if (limit != 0)
11243                 dd_dev_info(dd, "Invalid %s limit %d on VL %d, ignoring\n",
11244                             what, (int)limit, idx);
11245 }
11246
11247 /* change only the shared limit portion of SendCmGLobalCredit */
11248 static void set_global_shared(struct hfi1_devdata *dd, u16 limit)
11249 {
11250         u64 reg;
11251
11252         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11253         reg &= ~SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK;
11254         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT;
11255         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11256 }
11257
11258 /* change only the total credit limit portion of SendCmGLobalCredit */
11259 static void set_global_limit(struct hfi1_devdata *dd, u16 limit)
11260 {
11261         u64 reg;
11262
11263         reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
11264         reg &= ~SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK;
11265         reg |= (u64)limit << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
11266         write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
11267 }
11268
11269 /* set the given per-VL shared limit */
11270 static void set_vl_shared(struct hfi1_devdata *dd, int vl, u16 limit)
11271 {
11272         u64 reg;
11273         u32 addr;
11274
11275         if (vl < TXE_NUM_DATA_VL)
11276                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11277         else
11278                 addr = SEND_CM_CREDIT_VL15;
11279
11280         reg = read_csr(dd, addr);
11281         reg &= ~SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SMASK;
11282         reg |= (u64)limit << SEND_CM_CREDIT_VL_SHARED_LIMIT_VL_SHIFT;
11283         write_csr(dd, addr, reg);
11284 }
11285
11286 /* set the given per-VL dedicated limit */
11287 static void set_vl_dedicated(struct hfi1_devdata *dd, int vl, u16 limit)
11288 {
11289         u64 reg;
11290         u32 addr;
11291
11292         if (vl < TXE_NUM_DATA_VL)
11293                 addr = SEND_CM_CREDIT_VL + (8 * vl);
11294         else
11295                 addr = SEND_CM_CREDIT_VL15;
11296
11297         reg = read_csr(dd, addr);
11298         reg &= ~SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SMASK;
11299         reg |= (u64)limit << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT;
11300         write_csr(dd, addr, reg);
11301 }
11302
11303 /* spin until the given per-VL status mask bits clear */
11304 static void wait_for_vl_status_clear(struct hfi1_devdata *dd, u64 mask,
11305                                      const char *which)
11306 {
11307         unsigned long timeout;
11308         u64 reg;
11309
11310         timeout = jiffies + msecs_to_jiffies(VL_STATUS_CLEAR_TIMEOUT);
11311         while (1) {
11312                 reg = read_csr(dd, SEND_CM_CREDIT_USED_STATUS) & mask;
11313
11314                 if (reg == 0)
11315                         return; /* success */
11316                 if (time_after(jiffies, timeout))
11317                         break;          /* timed out */
11318                 udelay(1);
11319         }
11320
11321         dd_dev_err(dd,
11322                    "%s credit change status not clearing after %dms, mask 0x%llx, not clear 0x%llx\n",
11323                    which, VL_STATUS_CLEAR_TIMEOUT, mask, reg);
11324         /*
11325          * If this occurs, it is likely there was a credit loss on the link.
11326          * The only recovery from that is a link bounce.
11327          */
11328         dd_dev_err(dd,
11329                    "Continuing anyway.  A credit loss may occur.  Suggest a link bounce\n");
11330 }
11331
11332 /*
11333  * The number of credits on the VLs may be changed while everything
11334  * is "live", but the following algorithm must be followed due to
11335  * how the hardware is actually implemented.  In particular,
11336  * Return_Credit_Status[] is the only correct status check.
11337  *
11338  * if (reducing Global_Shared_Credit_Limit or any shared limit changing)
11339  *     set Global_Shared_Credit_Limit = 0
11340  *     use_all_vl = 1
11341  * mask0 = all VLs that are changing either dedicated or shared limits
11342  * set Shared_Limit[mask0] = 0
11343  * spin until Return_Credit_Status[use_all_vl ? all VL : mask0] == 0
11344  * if (changing any dedicated limit)
11345  *     mask1 = all VLs that are lowering dedicated limits
11346  *     lower Dedicated_Limit[mask1]
11347  *     spin until Return_Credit_Status[mask1] == 0
11348  *     raise Dedicated_Limits
11349  * raise Shared_Limits
11350  * raise Global_Shared_Credit_Limit
11351  *
11352  * lower = if the new limit is lower, set the limit to the new value
11353  * raise = if the new limit is higher than the current value (may be changed
11354  *      earlier in the algorithm), set the new limit to the new value
11355  */
11356 int set_buffer_control(struct hfi1_pportdata *ppd,
11357                        struct buffer_control *new_bc)
11358 {
11359         struct hfi1_devdata *dd = ppd->dd;
11360         u64 changing_mask, ld_mask, stat_mask;
11361         int change_count;
11362         int i, use_all_mask;
11363         int this_shared_changing;
11364         int vl_count = 0, ret;
11365         /*
11366          * A0: add the variable any_shared_limit_changing below and in the
11367          * algorithm above.  If removing A0 support, it can be removed.
11368          */
11369         int any_shared_limit_changing;
11370         struct buffer_control cur_bc;
11371         u8 changing[OPA_MAX_VLS];
11372         u8 lowering_dedicated[OPA_MAX_VLS];
11373         u16 cur_total;
11374         u32 new_total = 0;
11375         const u64 all_mask =
11376         SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK
11377          | SEND_CM_CREDIT_USED_STATUS_VL1_RETURN_CREDIT_STATUS_SMASK
11378          | SEND_CM_CREDIT_USED_STATUS_VL2_RETURN_CREDIT_STATUS_SMASK
11379          | SEND_CM_CREDIT_USED_STATUS_VL3_RETURN_CREDIT_STATUS_SMASK
11380          | SEND_CM_CREDIT_USED_STATUS_VL4_RETURN_CREDIT_STATUS_SMASK
11381          | SEND_CM_CREDIT_USED_STATUS_VL5_RETURN_CREDIT_STATUS_SMASK
11382          | SEND_CM_CREDIT_USED_STATUS_VL6_RETURN_CREDIT_STATUS_SMASK
11383          | SEND_CM_CREDIT_USED_STATUS_VL7_RETURN_CREDIT_STATUS_SMASK
11384          | SEND_CM_CREDIT_USED_STATUS_VL15_RETURN_CREDIT_STATUS_SMASK;
11385
11386 #define valid_vl(idx) ((idx) < TXE_NUM_DATA_VL || (idx) == 15)
11387 #define NUM_USABLE_VLS 16       /* look at VL15 and less */
11388
11389         /* find the new total credits, do sanity check on unused VLs */
11390         for (i = 0; i < OPA_MAX_VLS; i++) {
11391                 if (valid_vl(i)) {
11392                         new_total += be16_to_cpu(new_bc->vl[i].dedicated);
11393                         continue;
11394                 }
11395                 nonzero_msg(dd, i, "dedicated",
11396                             be16_to_cpu(new_bc->vl[i].dedicated));
11397                 nonzero_msg(dd, i, "shared",
11398                             be16_to_cpu(new_bc->vl[i].shared));
11399                 new_bc->vl[i].dedicated = 0;
11400                 new_bc->vl[i].shared = 0;
11401         }
11402         new_total += be16_to_cpu(new_bc->overall_shared_limit);
11403
11404         /* fetch the current values */
11405         get_buffer_control(dd, &cur_bc, &cur_total);
11406
11407         /*
11408          * Create the masks we will use.
11409          */
11410         memset(changing, 0, sizeof(changing));
11411         memset(lowering_dedicated, 0, sizeof(lowering_dedicated));
11412         /*
11413          * NOTE: Assumes that the individual VL bits are adjacent and in
11414          * increasing order
11415          */
11416         stat_mask =
11417                 SEND_CM_CREDIT_USED_STATUS_VL0_RETURN_CREDIT_STATUS_SMASK;
11418         changing_mask = 0;
11419         ld_mask = 0;
11420         change_count = 0;
11421         any_shared_limit_changing = 0;
11422         for (i = 0; i < NUM_USABLE_VLS; i++, stat_mask <<= 1) {
11423                 if (!valid_vl(i))
11424                         continue;
11425                 this_shared_changing = new_bc->vl[i].shared
11426                                                 != cur_bc.vl[i].shared;
11427                 if (this_shared_changing)
11428                         any_shared_limit_changing = 1;
11429                 if (new_bc->vl[i].dedicated != cur_bc.vl[i].dedicated ||
11430                     this_shared_changing) {
11431                         changing[i] = 1;
11432                         changing_mask |= stat_mask;
11433                         change_count++;
11434                 }
11435                 if (be16_to_cpu(new_bc->vl[i].dedicated) <
11436                                         be16_to_cpu(cur_bc.vl[i].dedicated)) {
11437                         lowering_dedicated[i] = 1;
11438                         ld_mask |= stat_mask;
11439                 }
11440         }
11441
11442         /* bracket the credit change with a total adjustment */
11443         if (new_total > cur_total)
11444                 set_global_limit(dd, new_total);
11445
11446         /*
11447          * Start the credit change algorithm.
11448          */
11449         use_all_mask = 0;
11450         if ((be16_to_cpu(new_bc->overall_shared_limit) <
11451              be16_to_cpu(cur_bc.overall_shared_limit)) ||
11452             (is_ax(dd) && any_shared_limit_changing)) {
11453                 set_global_shared(dd, 0);
11454                 cur_bc.overall_shared_limit = 0;
11455                 use_all_mask = 1;
11456         }
11457
11458         for (i = 0; i < NUM_USABLE_VLS; i++) {
11459                 if (!valid_vl(i))
11460                         continue;
11461
11462                 if (changing[i]) {
11463                         set_vl_shared(dd, i, 0);
11464                         cur_bc.vl[i].shared = 0;
11465                 }
11466         }
11467
11468         wait_for_vl_status_clear(dd, use_all_mask ? all_mask : changing_mask,
11469                                  "shared");
11470
11471         if (change_count > 0) {
11472                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11473                         if (!valid_vl(i))
11474                                 continue;
11475
11476                         if (lowering_dedicated[i]) {
11477                                 set_vl_dedicated(dd, i,
11478                                                  be16_to_cpu(new_bc->
11479                                                              vl[i].dedicated));
11480                                 cur_bc.vl[i].dedicated =
11481                                                 new_bc->vl[i].dedicated;
11482                         }
11483                 }
11484
11485                 wait_for_vl_status_clear(dd, ld_mask, "dedicated");
11486
11487                 /* now raise all dedicated that are going up */
11488                 for (i = 0; i < NUM_USABLE_VLS; i++) {
11489                         if (!valid_vl(i))
11490                                 continue;
11491
11492                         if (be16_to_cpu(new_bc->vl[i].dedicated) >
11493                                         be16_to_cpu(cur_bc.vl[i].dedicated))
11494                                 set_vl_dedicated(dd, i,
11495                                                  be16_to_cpu(new_bc->
11496                                                              vl[i].dedicated));
11497                 }
11498         }
11499
11500         /* next raise all shared that are going up */
11501         for (i = 0; i < NUM_USABLE_VLS; i++) {
11502                 if (!valid_vl(i))
11503                         continue;
11504
11505                 if (be16_to_cpu(new_bc->vl[i].shared) >
11506                                 be16_to_cpu(cur_bc.vl[i].shared))
11507                         set_vl_shared(dd, i, be16_to_cpu(new_bc->vl[i].shared));
11508         }
11509
11510         /* finally raise the global shared */
11511         if (be16_to_cpu(new_bc->overall_shared_limit) >
11512             be16_to_cpu(cur_bc.overall_shared_limit))
11513                 set_global_shared(dd,
11514                                   be16_to_cpu(new_bc->overall_shared_limit));
11515
11516         /* bracket the credit change with a total adjustment */
11517         if (new_total < cur_total)
11518                 set_global_limit(dd, new_total);
11519
11520         /*
11521          * Determine the actual number of operational VLS using the number of
11522          * dedicated and shared credits for each VL.
11523          */
11524         if (change_count > 0) {
11525                 for (i = 0; i < TXE_NUM_DATA_VL; i++)
11526                         if (be16_to_cpu(new_bc->vl[i].dedicated) > 0 ||
11527                             be16_to_cpu(new_bc->vl[i].shared) > 0)
11528                                 vl_count++;
11529                 ppd->actual_vls_operational = vl_count;
11530                 ret = sdma_map_init(dd, ppd->port - 1, vl_count ?
11531                                     ppd->actual_vls_operational :
11532                                     ppd->vls_operational,
11533                                     NULL);
11534                 if (ret == 0)
11535                         ret = pio_map_init(dd, ppd->port - 1, vl_count ?
11536                                            ppd->actual_vls_operational :
11537                                            ppd->vls_operational, NULL);
11538                 if (ret)
11539                         return ret;
11540         }
11541         return 0;
11542 }
11543
11544 /*
11545  * Read the given fabric manager table. Return the size of the
11546  * table (in bytes) on success, and a negative error code on
11547  * failure.
11548  */
11549 int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t)
11550
11551 {
11552         int size;
11553         struct vl_arb_cache *vlc;
11554
11555         switch (which) {
11556         case FM_TBL_VL_HIGH_ARB:
11557                 size = 256;
11558                 /*
11559                  * OPA specifies 128 elements (of 2 bytes each), though
11560                  * HFI supports only 16 elements in h/w.
11561                  */
11562                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11563                 vl_arb_get_cache(vlc, t);
11564                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11565                 break;
11566         case FM_TBL_VL_LOW_ARB:
11567                 size = 256;
11568                 /*
11569                  * OPA specifies 128 elements (of 2 bytes each), though
11570                  * HFI supports only 16 elements in h/w.
11571                  */
11572                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11573                 vl_arb_get_cache(vlc, t);
11574                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11575                 break;
11576         case FM_TBL_BUFFER_CONTROL:
11577                 size = get_buffer_control(ppd->dd, t, NULL);
11578                 break;
11579         case FM_TBL_SC2VLNT:
11580                 size = get_sc2vlnt(ppd->dd, t);
11581                 break;
11582         case FM_TBL_VL_PREEMPT_ELEMS:
11583                 size = 256;
11584                 /* OPA specifies 128 elements, of 2 bytes each */
11585                 get_vlarb_preempt(ppd->dd, OPA_MAX_VLS, t);
11586                 break;
11587         case FM_TBL_VL_PREEMPT_MATRIX:
11588                 size = 256;
11589                 /*
11590                  * OPA specifies that this is the same size as the VL
11591                  * arbitration tables (i.e., 256 bytes).
11592                  */
11593                 break;
11594         default:
11595                 return -EINVAL;
11596         }
11597         return size;
11598 }
11599
11600 /*
11601  * Write the given fabric manager table.
11602  */
11603 int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t)
11604 {
11605         int ret = 0;
11606         struct vl_arb_cache *vlc;
11607
11608         switch (which) {
11609         case FM_TBL_VL_HIGH_ARB:
11610                 vlc = vl_arb_lock_cache(ppd, HI_PRIO_TABLE);
11611                 if (vl_arb_match_cache(vlc, t)) {
11612                         vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11613                         break;
11614                 }
11615                 vl_arb_set_cache(vlc, t);
11616                 vl_arb_unlock_cache(ppd, HI_PRIO_TABLE);
11617                 ret = set_vl_weights(ppd, SEND_HIGH_PRIORITY_LIST,
11618                                      VL_ARB_HIGH_PRIO_TABLE_SIZE, t);
11619                 break;
11620         case FM_TBL_VL_LOW_ARB:
11621                 vlc = vl_arb_lock_cache(ppd, LO_PRIO_TABLE);
11622                 if (vl_arb_match_cache(vlc, t)) {
11623                         vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11624                         break;
11625                 }
11626                 vl_arb_set_cache(vlc, t);
11627                 vl_arb_unlock_cache(ppd, LO_PRIO_TABLE);
11628                 ret = set_vl_weights(ppd, SEND_LOW_PRIORITY_LIST,
11629                                      VL_ARB_LOW_PRIO_TABLE_SIZE, t);
11630                 break;
11631         case FM_TBL_BUFFER_CONTROL:
11632                 ret = set_buffer_control(ppd, t);
11633                 break;
11634         case FM_TBL_SC2VLNT:
11635                 set_sc2vlnt(ppd->dd, t);
11636                 break;
11637         default:
11638                 ret = -EINVAL;
11639         }
11640         return ret;
11641 }
11642
11643 /*
11644  * Disable all data VLs.
11645  *
11646  * Return 0 if disabled, non-zero if the VLs cannot be disabled.
11647  */
11648 static int disable_data_vls(struct hfi1_devdata *dd)
11649 {
11650         if (is_ax(dd))
11651                 return 1;
11652
11653         pio_send_control(dd, PSC_DATA_VL_DISABLE);
11654
11655         return 0;
11656 }
11657
11658 /*
11659  * open_fill_data_vls() - the counterpart to stop_drain_data_vls().
11660  * Just re-enables all data VLs (the "fill" part happens
11661  * automatically - the name was chosen for symmetry with
11662  * stop_drain_data_vls()).
11663  *
11664  * Return 0 if successful, non-zero if the VLs cannot be enabled.
11665  */
11666 int open_fill_data_vls(struct hfi1_devdata *dd)
11667 {
11668         if (is_ax(dd))
11669                 return 1;
11670
11671         pio_send_control(dd, PSC_DATA_VL_ENABLE);
11672
11673         return 0;
11674 }
11675
11676 /*
11677  * drain_data_vls() - assumes that disable_data_vls() has been called,
11678  * wait for occupancy (of per-VL FIFOs) for all contexts, and SDMA
11679  * engines to drop to 0.
11680  */
11681 static void drain_data_vls(struct hfi1_devdata *dd)
11682 {
11683         sc_wait(dd);
11684         sdma_wait(dd);
11685         pause_for_credit_return(dd);
11686 }
11687
11688 /*
11689  * stop_drain_data_vls() - disable, then drain all per-VL fifos.
11690  *
11691  * Use open_fill_data_vls() to resume using data VLs.  This pair is
11692  * meant to be used like this:
11693  *
11694  * stop_drain_data_vls(dd);
11695  * // do things with per-VL resources
11696  * open_fill_data_vls(dd);
11697  */
11698 int stop_drain_data_vls(struct hfi1_devdata *dd)
11699 {
11700         int ret;
11701
11702         ret = disable_data_vls(dd);
11703         if (ret == 0)
11704                 drain_data_vls(dd);
11705
11706         return ret;
11707 }
11708
11709 /*
11710  * Convert a nanosecond time to a cclock count.  No matter how slow
11711  * the cclock, a non-zero ns will always have a non-zero result.
11712  */
11713 u32 ns_to_cclock(struct hfi1_devdata *dd, u32 ns)
11714 {
11715         u32 cclocks;
11716
11717         if (dd->icode == ICODE_FPGA_EMULATION)
11718                 cclocks = (ns * 1000) / FPGA_CCLOCK_PS;
11719         else  /* simulation pretends to be ASIC */
11720                 cclocks = (ns * 1000) / ASIC_CCLOCK_PS;
11721         if (ns && !cclocks)     /* if ns nonzero, must be at least 1 */
11722                 cclocks = 1;
11723         return cclocks;
11724 }
11725
11726 /*
11727  * Convert a cclock count to nanoseconds. Not matter how slow
11728  * the cclock, a non-zero cclocks will always have a non-zero result.
11729  */
11730 u32 cclock_to_ns(struct hfi1_devdata *dd, u32 cclocks)
11731 {
11732         u32 ns;
11733
11734         if (dd->icode == ICODE_FPGA_EMULATION)
11735                 ns = (cclocks * FPGA_CCLOCK_PS) / 1000;
11736         else  /* simulation pretends to be ASIC */
11737                 ns = (cclocks * ASIC_CCLOCK_PS) / 1000;
11738         if (cclocks && !ns)
11739                 ns = 1;
11740         return ns;
11741 }
11742
11743 /*
11744  * Dynamically adjust the receive interrupt timeout for a context based on
11745  * incoming packet rate.
11746  *
11747  * NOTE: Dynamic adjustment does not allow rcv_intr_count to be zero.
11748  */
11749 static void adjust_rcv_timeout(struct hfi1_ctxtdata *rcd, u32 npkts)
11750 {
11751         struct hfi1_devdata *dd = rcd->dd;
11752         u32 timeout = rcd->rcvavail_timeout;
11753
11754         /*
11755          * This algorithm doubles or halves the timeout depending on whether
11756          * the number of packets received in this interrupt were less than or
11757          * greater equal the interrupt count.
11758          *
11759          * The calculations below do not allow a steady state to be achieved.
11760          * Only at the endpoints it is possible to have an unchanging
11761          * timeout.
11762          */
11763         if (npkts < rcv_intr_count) {
11764                 /*
11765                  * Not enough packets arrived before the timeout, adjust
11766                  * timeout downward.
11767                  */
11768                 if (timeout < 2) /* already at minimum? */
11769                         return;
11770                 timeout >>= 1;
11771         } else {
11772                 /*
11773                  * More than enough packets arrived before the timeout, adjust
11774                  * timeout upward.
11775                  */
11776                 if (timeout >= dd->rcv_intr_timeout_csr) /* already at max? */
11777                         return;
11778                 timeout = min(timeout << 1, dd->rcv_intr_timeout_csr);
11779         }
11780
11781         rcd->rcvavail_timeout = timeout;
11782         /*
11783          * timeout cannot be larger than rcv_intr_timeout_csr which has already
11784          * been verified to be in range
11785          */
11786         write_kctxt_csr(dd, rcd->ctxt, RCV_AVAIL_TIME_OUT,
11787                         (u64)timeout <<
11788                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
11789 }
11790
11791 void update_usrhead(struct hfi1_ctxtdata *rcd, u32 hd, u32 updegr, u32 egrhd,
11792                     u32 intr_adjust, u32 npkts)
11793 {
11794         struct hfi1_devdata *dd = rcd->dd;
11795         u64 reg;
11796         u32 ctxt = rcd->ctxt;
11797
11798         /*
11799          * Need to write timeout register before updating RcvHdrHead to ensure
11800          * that a new value is used when the HW decides to restart counting.
11801          */
11802         if (intr_adjust)
11803                 adjust_rcv_timeout(rcd, npkts);
11804         if (updegr) {
11805                 reg = (egrhd & RCV_EGR_INDEX_HEAD_HEAD_MASK)
11806                         << RCV_EGR_INDEX_HEAD_HEAD_SHIFT;
11807                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, reg);
11808         }
11809         reg = ((u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT) |
11810                 (((u64)hd & RCV_HDR_HEAD_HEAD_MASK)
11811                         << RCV_HDR_HEAD_HEAD_SHIFT);
11812         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
11813 }
11814
11815 u32 hdrqempty(struct hfi1_ctxtdata *rcd)
11816 {
11817         u32 head, tail;
11818
11819         head = (read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_HEAD)
11820                 & RCV_HDR_HEAD_HEAD_SMASK) >> RCV_HDR_HEAD_HEAD_SHIFT;
11821
11822         if (rcd->rcvhdrtail_kvaddr)
11823                 tail = get_rcvhdrtail(rcd);
11824         else
11825                 tail = read_uctxt_csr(rcd->dd, rcd->ctxt, RCV_HDR_TAIL);
11826
11827         return head == tail;
11828 }
11829
11830 /*
11831  * Context Control and Receive Array encoding for buffer size:
11832  *      0x0 invalid
11833  *      0x1   4 KB
11834  *      0x2   8 KB
11835  *      0x3  16 KB
11836  *      0x4  32 KB
11837  *      0x5  64 KB
11838  *      0x6 128 KB
11839  *      0x7 256 KB
11840  *      0x8 512 KB (Receive Array only)
11841  *      0x9   1 MB (Receive Array only)
11842  *      0xa   2 MB (Receive Array only)
11843  *
11844  *      0xB-0xF - reserved (Receive Array only)
11845  *
11846  *
11847  * This routine assumes that the value has already been sanity checked.
11848  */
11849 static u32 encoded_size(u32 size)
11850 {
11851         switch (size) {
11852         case   4 * 1024: return 0x1;
11853         case   8 * 1024: return 0x2;
11854         case  16 * 1024: return 0x3;
11855         case  32 * 1024: return 0x4;
11856         case  64 * 1024: return 0x5;
11857         case 128 * 1024: return 0x6;
11858         case 256 * 1024: return 0x7;
11859         case 512 * 1024: return 0x8;
11860         case   1 * 1024 * 1024: return 0x9;
11861         case   2 * 1024 * 1024: return 0xa;
11862         }
11863         return 0x1;     /* if invalid, go with the minimum size */
11864 }
11865
11866 void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
11867                   struct hfi1_ctxtdata *rcd)
11868 {
11869         u64 rcvctrl, reg;
11870         int did_enable = 0;
11871         u16 ctxt;
11872
11873         if (!rcd)
11874                 return;
11875
11876         ctxt = rcd->ctxt;
11877
11878         hfi1_cdbg(RCVCTRL, "ctxt %d op 0x%x", ctxt, op);
11879
11880         rcvctrl = read_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL);
11881         /* if the context already enabled, don't do the extra steps */
11882         if ((op & HFI1_RCVCTRL_CTXT_ENB) &&
11883             !(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
11884                 /* reset the tail and hdr addresses, and sequence count */
11885                 write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
11886                                 rcd->rcvhdrq_dma);
11887                 if (rcd->rcvhdrtail_kvaddr)
11888                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11889                                         rcd->rcvhdrqtailaddr_dma);
11890                 rcd->seq_cnt = 1;
11891
11892                 /* reset the cached receive header queue head value */
11893                 rcd->head = 0;
11894
11895                 /*
11896                  * Zero the receive header queue so we don't get false
11897                  * positives when checking the sequence number.  The
11898                  * sequence numbers could land exactly on the same spot.
11899                  * E.g. a rcd restart before the receive header wrapped.
11900                  */
11901                 memset(rcd->rcvhdrq, 0, rcvhdrq_size(rcd));
11902
11903                 /* starting timeout */
11904                 rcd->rcvavail_timeout = dd->rcv_intr_timeout_csr;
11905
11906                 /* enable the context */
11907                 rcvctrl |= RCV_CTXT_CTRL_ENABLE_SMASK;
11908
11909                 /* clean the egr buffer size first */
11910                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11911                 rcvctrl |= ((u64)encoded_size(rcd->egrbufs.rcvtid_size)
11912                                 & RCV_CTXT_CTRL_EGR_BUF_SIZE_MASK)
11913                                         << RCV_CTXT_CTRL_EGR_BUF_SIZE_SHIFT;
11914
11915                 /* zero RcvHdrHead - set RcvHdrHead.Counter after enable */
11916                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0);
11917                 did_enable = 1;
11918
11919                 /* zero RcvEgrIndexHead */
11920                 write_uctxt_csr(dd, ctxt, RCV_EGR_INDEX_HEAD, 0);
11921
11922                 /* set eager count and base index */
11923                 reg = (((u64)(rcd->egrbufs.alloced >> RCV_SHIFT)
11924                         & RCV_EGR_CTRL_EGR_CNT_MASK)
11925                        << RCV_EGR_CTRL_EGR_CNT_SHIFT) |
11926                         (((rcd->eager_base >> RCV_SHIFT)
11927                           & RCV_EGR_CTRL_EGR_BASE_INDEX_MASK)
11928                          << RCV_EGR_CTRL_EGR_BASE_INDEX_SHIFT);
11929                 write_kctxt_csr(dd, ctxt, RCV_EGR_CTRL, reg);
11930
11931                 /*
11932                  * Set TID (expected) count and base index.
11933                  * rcd->expected_count is set to individual RcvArray entries,
11934                  * not pairs, and the CSR takes a pair-count in groups of
11935                  * four, so divide by 8.
11936                  */
11937                 reg = (((rcd->expected_count >> RCV_SHIFT)
11938                                         & RCV_TID_CTRL_TID_PAIR_CNT_MASK)
11939                                 << RCV_TID_CTRL_TID_PAIR_CNT_SHIFT) |
11940                       (((rcd->expected_base >> RCV_SHIFT)
11941                                         & RCV_TID_CTRL_TID_BASE_INDEX_MASK)
11942                                 << RCV_TID_CTRL_TID_BASE_INDEX_SHIFT);
11943                 write_kctxt_csr(dd, ctxt, RCV_TID_CTRL, reg);
11944                 if (ctxt == HFI1_CTRL_CTXT)
11945                         write_csr(dd, RCV_VL15, HFI1_CTRL_CTXT);
11946         }
11947         if (op & HFI1_RCVCTRL_CTXT_DIS) {
11948                 write_csr(dd, RCV_VL15, 0);
11949                 /*
11950                  * When receive context is being disabled turn on tail
11951                  * update with a dummy tail address and then disable
11952                  * receive context.
11953                  */
11954                 if (dd->rcvhdrtail_dummy_dma) {
11955                         write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
11956                                         dd->rcvhdrtail_dummy_dma);
11957                         /* Enabling RcvCtxtCtrl.TailUpd is intentional. */
11958                         rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11959                 }
11960
11961                 rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
11962         }
11963         if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
11964                 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11965                               IS_RCVAVAIL_START + rcd->ctxt, true);
11966                 rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11967         }
11968         if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
11969                 set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
11970                               IS_RCVAVAIL_START + rcd->ctxt, false);
11971                 rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
11972         }
11973         if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
11974                 rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11975         if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
11976                 /* See comment on RcvCtxtCtrl.TailUpd above */
11977                 if (!(op & HFI1_RCVCTRL_CTXT_DIS))
11978                         rcvctrl &= ~RCV_CTXT_CTRL_TAIL_UPD_SMASK;
11979         }
11980         if (op & HFI1_RCVCTRL_TIDFLOW_ENB)
11981                 rcvctrl |= RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11982         if (op & HFI1_RCVCTRL_TIDFLOW_DIS)
11983                 rcvctrl &= ~RCV_CTXT_CTRL_TID_FLOW_ENABLE_SMASK;
11984         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_ENB) {
11985                 /*
11986                  * In one-packet-per-eager mode, the size comes from
11987                  * the RcvArray entry.
11988                  */
11989                 rcvctrl &= ~RCV_CTXT_CTRL_EGR_BUF_SIZE_SMASK;
11990                 rcvctrl |= RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11991         }
11992         if (op & HFI1_RCVCTRL_ONE_PKT_EGR_DIS)
11993                 rcvctrl &= ~RCV_CTXT_CTRL_ONE_PACKET_PER_EGR_BUFFER_SMASK;
11994         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_ENB)
11995                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11996         if (op & HFI1_RCVCTRL_NO_RHQ_DROP_DIS)
11997                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK;
11998         if (op & HFI1_RCVCTRL_NO_EGR_DROP_ENB)
11999                 rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12000         if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
12001                 rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
12002         if (op & HFI1_RCVCTRL_URGENT_ENB)
12003                 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12004                               IS_RCVURGENT_START + rcd->ctxt, true);
12005         if (op & HFI1_RCVCTRL_URGENT_DIS)
12006                 set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
12007                               IS_RCVURGENT_START + rcd->ctxt, false);
12008
12009         hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
12010         write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
12011
12012         /* work around sticky RcvCtxtStatus.BlockedRHQFull */
12013         if (did_enable &&
12014             (rcvctrl & RCV_CTXT_CTRL_DONT_DROP_RHQ_FULL_SMASK)) {
12015                 reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12016                 if (reg != 0) {
12017                         dd_dev_info(dd, "ctxt %d status %lld (blocked)\n",
12018                                     ctxt, reg);
12019                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12020                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x10);
12021                         write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, 0x00);
12022                         read_uctxt_csr(dd, ctxt, RCV_HDR_HEAD);
12023                         reg = read_kctxt_csr(dd, ctxt, RCV_CTXT_STATUS);
12024                         dd_dev_info(dd, "ctxt %d status %lld (%s blocked)\n",
12025                                     ctxt, reg, reg == 0 ? "not" : "still");
12026                 }
12027         }
12028
12029         if (did_enable) {
12030                 /*
12031                  * The interrupt timeout and count must be set after
12032                  * the context is enabled to take effect.
12033                  */
12034                 /* set interrupt timeout */
12035                 write_kctxt_csr(dd, ctxt, RCV_AVAIL_TIME_OUT,
12036                                 (u64)rcd->rcvavail_timeout <<
12037                                 RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_SHIFT);
12038
12039                 /* set RcvHdrHead.Counter, zero RcvHdrHead.Head (again) */
12040                 reg = (u64)rcv_intr_count << RCV_HDR_HEAD_COUNTER_SHIFT;
12041                 write_uctxt_csr(dd, ctxt, RCV_HDR_HEAD, reg);
12042         }
12043
12044         if (op & (HFI1_RCVCTRL_TAILUPD_DIS | HFI1_RCVCTRL_CTXT_DIS))
12045                 /*
12046                  * If the context has been disabled and the Tail Update has
12047                  * been cleared, set the RCV_HDR_TAIL_ADDR CSR to dummy address
12048                  * so it doesn't contain an address that is invalid.
12049                  */
12050                 write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
12051                                 dd->rcvhdrtail_dummy_dma);
12052 }
12053
12054 u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
12055 {
12056         int ret;
12057         u64 val = 0;
12058
12059         if (namep) {
12060                 ret = dd->cntrnameslen;
12061                 *namep = dd->cntrnames;
12062         } else {
12063                 const struct cntr_entry *entry;
12064                 int i, j;
12065
12066                 ret = (dd->ndevcntrs) * sizeof(u64);
12067
12068                 /* Get the start of the block of counters */
12069                 *cntrp = dd->cntrs;
12070
12071                 /*
12072                  * Now go and fill in each counter in the block.
12073                  */
12074                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12075                         entry = &dev_cntrs[i];
12076                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12077                         if (entry->flags & CNTR_DISABLED) {
12078                                 /* Nothing */
12079                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12080                         } else {
12081                                 if (entry->flags & CNTR_VL) {
12082                                         hfi1_cdbg(CNTR, "\tPer VL\n");
12083                                         for (j = 0; j < C_VL_COUNT; j++) {
12084                                                 val = entry->rw_cntr(entry,
12085                                                                   dd, j,
12086                                                                   CNTR_MODE_R,
12087                                                                   0);
12088                                                 hfi1_cdbg(
12089                                                    CNTR,
12090                                                    "\t\tRead 0x%llx for %d\n",
12091                                                    val, j);
12092                                                 dd->cntrs[entry->offset + j] =
12093                                                                             val;
12094                                         }
12095                                 } else if (entry->flags & CNTR_SDMA) {
12096                                         hfi1_cdbg(CNTR,
12097                                                   "\t Per SDMA Engine\n");
12098                                         for (j = 0; j < chip_sdma_engines(dd);
12099                                              j++) {
12100                                                 val =
12101                                                 entry->rw_cntr(entry, dd, j,
12102                                                                CNTR_MODE_R, 0);
12103                                                 hfi1_cdbg(CNTR,
12104                                                           "\t\tRead 0x%llx for %d\n",
12105                                                           val, j);
12106                                                 dd->cntrs[entry->offset + j] =
12107                                                                         val;
12108                                         }
12109                                 } else {
12110                                         val = entry->rw_cntr(entry, dd,
12111                                                         CNTR_INVALID_VL,
12112                                                         CNTR_MODE_R, 0);
12113                                         dd->cntrs[entry->offset] = val;
12114                                         hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12115                                 }
12116                         }
12117                 }
12118         }
12119         return ret;
12120 }
12121
12122 /*
12123  * Used by sysfs to create files for hfi stats to read
12124  */
12125 u32 hfi1_read_portcntrs(struct hfi1_pportdata *ppd, char **namep, u64 **cntrp)
12126 {
12127         int ret;
12128         u64 val = 0;
12129
12130         if (namep) {
12131                 ret = ppd->dd->portcntrnameslen;
12132                 *namep = ppd->dd->portcntrnames;
12133         } else {
12134                 const struct cntr_entry *entry;
12135                 int i, j;
12136
12137                 ret = ppd->dd->nportcntrs * sizeof(u64);
12138                 *cntrp = ppd->cntrs;
12139
12140                 for (i = 0; i < PORT_CNTR_LAST; i++) {
12141                         entry = &port_cntrs[i];
12142                         hfi1_cdbg(CNTR, "reading %s", entry->name);
12143                         if (entry->flags & CNTR_DISABLED) {
12144                                 /* Nothing */
12145                                 hfi1_cdbg(CNTR, "\tDisabled\n");
12146                                 continue;
12147                         }
12148
12149                         if (entry->flags & CNTR_VL) {
12150                                 hfi1_cdbg(CNTR, "\tPer VL");
12151                                 for (j = 0; j < C_VL_COUNT; j++) {
12152                                         val = entry->rw_cntr(entry, ppd, j,
12153                                                                CNTR_MODE_R,
12154                                                                0);
12155                                         hfi1_cdbg(
12156                                            CNTR,
12157                                            "\t\tRead 0x%llx for %d",
12158                                            val, j);
12159                                         ppd->cntrs[entry->offset + j] = val;
12160                                 }
12161                         } else {
12162                                 val = entry->rw_cntr(entry, ppd,
12163                                                        CNTR_INVALID_VL,
12164                                                        CNTR_MODE_R,
12165                                                        0);
12166                                 ppd->cntrs[entry->offset] = val;
12167                                 hfi1_cdbg(CNTR, "\tRead 0x%llx", val);
12168                         }
12169                 }
12170         }
12171         return ret;
12172 }
12173
12174 static void free_cntrs(struct hfi1_devdata *dd)
12175 {
12176         struct hfi1_pportdata *ppd;
12177         int i;
12178
12179         if (dd->synth_stats_timer.function)
12180                 del_timer_sync(&dd->synth_stats_timer);
12181         ppd = (struct hfi1_pportdata *)(dd + 1);
12182         for (i = 0; i < dd->num_pports; i++, ppd++) {
12183                 kfree(ppd->cntrs);
12184                 kfree(ppd->scntrs);
12185                 free_percpu(ppd->ibport_data.rvp.rc_acks);
12186                 free_percpu(ppd->ibport_data.rvp.rc_qacks);
12187                 free_percpu(ppd->ibport_data.rvp.rc_delayed_comp);
12188                 ppd->cntrs = NULL;
12189                 ppd->scntrs = NULL;
12190                 ppd->ibport_data.rvp.rc_acks = NULL;
12191                 ppd->ibport_data.rvp.rc_qacks = NULL;
12192                 ppd->ibport_data.rvp.rc_delayed_comp = NULL;
12193         }
12194         kfree(dd->portcntrnames);
12195         dd->portcntrnames = NULL;
12196         kfree(dd->cntrs);
12197         dd->cntrs = NULL;
12198         kfree(dd->scntrs);
12199         dd->scntrs = NULL;
12200         kfree(dd->cntrnames);
12201         dd->cntrnames = NULL;
12202         if (dd->update_cntr_wq) {
12203                 destroy_workqueue(dd->update_cntr_wq);
12204                 dd->update_cntr_wq = NULL;
12205         }
12206 }
12207
12208 static u64 read_dev_port_cntr(struct hfi1_devdata *dd, struct cntr_entry *entry,
12209                               u64 *psval, void *context, int vl)
12210 {
12211         u64 val;
12212         u64 sval = *psval;
12213
12214         if (entry->flags & CNTR_DISABLED) {
12215                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12216                 return 0;
12217         }
12218
12219         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12220
12221         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_R, 0);
12222
12223         /* If its a synthetic counter there is more work we need to do */
12224         if (entry->flags & CNTR_SYNTH) {
12225                 if (sval == CNTR_MAX) {
12226                         /* No need to read already saturated */
12227                         return CNTR_MAX;
12228                 }
12229
12230                 if (entry->flags & CNTR_32BIT) {
12231                         /* 32bit counters can wrap multiple times */
12232                         u64 upper = sval >> 32;
12233                         u64 lower = (sval << 32) >> 32;
12234
12235                         if (lower > val) { /* hw wrapped */
12236                                 if (upper == CNTR_32BIT_MAX)
12237                                         val = CNTR_MAX;
12238                                 else
12239                                         upper++;
12240                         }
12241
12242                         if (val != CNTR_MAX)
12243                                 val = (upper << 32) | val;
12244
12245                 } else {
12246                         /* If we rolled we are saturated */
12247                         if ((val < sval) || (val > CNTR_MAX))
12248                                 val = CNTR_MAX;
12249                 }
12250         }
12251
12252         *psval = val;
12253
12254         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12255
12256         return val;
12257 }
12258
12259 static u64 write_dev_port_cntr(struct hfi1_devdata *dd,
12260                                struct cntr_entry *entry,
12261                                u64 *psval, void *context, int vl, u64 data)
12262 {
12263         u64 val;
12264
12265         if (entry->flags & CNTR_DISABLED) {
12266                 dd_dev_err(dd, "Counter %s not enabled", entry->name);
12267                 return 0;
12268         }
12269
12270         hfi1_cdbg(CNTR, "cntr: %s vl %d psval 0x%llx", entry->name, vl, *psval);
12271
12272         if (entry->flags & CNTR_SYNTH) {
12273                 *psval = data;
12274                 if (entry->flags & CNTR_32BIT) {
12275                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12276                                              (data << 32) >> 32);
12277                         val = data; /* return the full 64bit value */
12278                 } else {
12279                         val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W,
12280                                              data);
12281                 }
12282         } else {
12283                 val = entry->rw_cntr(entry, context, vl, CNTR_MODE_W, data);
12284         }
12285
12286         *psval = val;
12287
12288         hfi1_cdbg(CNTR, "\tNew val=0x%llx", val);
12289
12290         return val;
12291 }
12292
12293 u64 read_dev_cntr(struct hfi1_devdata *dd, int index, int vl)
12294 {
12295         struct cntr_entry *entry;
12296         u64 *sval;
12297
12298         entry = &dev_cntrs[index];
12299         sval = dd->scntrs + entry->offset;
12300
12301         if (vl != CNTR_INVALID_VL)
12302                 sval += vl;
12303
12304         return read_dev_port_cntr(dd, entry, sval, dd, vl);
12305 }
12306
12307 u64 write_dev_cntr(struct hfi1_devdata *dd, int index, int vl, u64 data)
12308 {
12309         struct cntr_entry *entry;
12310         u64 *sval;
12311
12312         entry = &dev_cntrs[index];
12313         sval = dd->scntrs + entry->offset;
12314
12315         if (vl != CNTR_INVALID_VL)
12316                 sval += vl;
12317
12318         return write_dev_port_cntr(dd, entry, sval, dd, vl, data);
12319 }
12320
12321 u64 read_port_cntr(struct hfi1_pportdata *ppd, int index, int vl)
12322 {
12323         struct cntr_entry *entry;
12324         u64 *sval;
12325
12326         entry = &port_cntrs[index];
12327         sval = ppd->scntrs + entry->offset;
12328
12329         if (vl != CNTR_INVALID_VL)
12330                 sval += vl;
12331
12332         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12333             (index <= C_RCV_HDR_OVF_LAST)) {
12334                 /* We do not want to bother for disabled contexts */
12335                 return 0;
12336         }
12337
12338         return read_dev_port_cntr(ppd->dd, entry, sval, ppd, vl);
12339 }
12340
12341 u64 write_port_cntr(struct hfi1_pportdata *ppd, int index, int vl, u64 data)
12342 {
12343         struct cntr_entry *entry;
12344         u64 *sval;
12345
12346         entry = &port_cntrs[index];
12347         sval = ppd->scntrs + entry->offset;
12348
12349         if (vl != CNTR_INVALID_VL)
12350                 sval += vl;
12351
12352         if ((index >= C_RCV_HDR_OVF_FIRST + ppd->dd->num_rcv_contexts) &&
12353             (index <= C_RCV_HDR_OVF_LAST)) {
12354                 /* We do not want to bother for disabled contexts */
12355                 return 0;
12356         }
12357
12358         return write_dev_port_cntr(ppd->dd, entry, sval, ppd, vl, data);
12359 }
12360
12361 static void do_update_synth_timer(struct work_struct *work)
12362 {
12363         u64 cur_tx;
12364         u64 cur_rx;
12365         u64 total_flits;
12366         u8 update = 0;
12367         int i, j, vl;
12368         struct hfi1_pportdata *ppd;
12369         struct cntr_entry *entry;
12370         struct hfi1_devdata *dd = container_of(work, struct hfi1_devdata,
12371                                                update_cntr_work);
12372
12373         /*
12374          * Rather than keep beating on the CSRs pick a minimal set that we can
12375          * check to watch for potential roll over. We can do this by looking at
12376          * the number of flits sent/recv. If the total flits exceeds 32bits then
12377          * we have to iterate all the counters and update.
12378          */
12379         entry = &dev_cntrs[C_DC_RCV_FLITS];
12380         cur_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12381
12382         entry = &dev_cntrs[C_DC_XMIT_FLITS];
12383         cur_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL, CNTR_MODE_R, 0);
12384
12385         hfi1_cdbg(
12386             CNTR,
12387             "[%d] curr tx=0x%llx rx=0x%llx :: last tx=0x%llx rx=0x%llx\n",
12388             dd->unit, cur_tx, cur_rx, dd->last_tx, dd->last_rx);
12389
12390         if ((cur_tx < dd->last_tx) || (cur_rx < dd->last_rx)) {
12391                 /*
12392                  * May not be strictly necessary to update but it won't hurt and
12393                  * simplifies the logic here.
12394                  */
12395                 update = 1;
12396                 hfi1_cdbg(CNTR, "[%d] Tripwire counter rolled, updating",
12397                           dd->unit);
12398         } else {
12399                 total_flits = (cur_tx - dd->last_tx) + (cur_rx - dd->last_rx);
12400                 hfi1_cdbg(CNTR,
12401                           "[%d] total flits 0x%llx limit 0x%llx\n", dd->unit,
12402                           total_flits, (u64)CNTR_32BIT_MAX);
12403                 if (total_flits >= CNTR_32BIT_MAX) {
12404                         hfi1_cdbg(CNTR, "[%d] 32bit limit hit, updating",
12405                                   dd->unit);
12406                         update = 1;
12407                 }
12408         }
12409
12410         if (update) {
12411                 hfi1_cdbg(CNTR, "[%d] Updating dd and ppd counters", dd->unit);
12412                 for (i = 0; i < DEV_CNTR_LAST; i++) {
12413                         entry = &dev_cntrs[i];
12414                         if (entry->flags & CNTR_VL) {
12415                                 for (vl = 0; vl < C_VL_COUNT; vl++)
12416                                         read_dev_cntr(dd, i, vl);
12417                         } else {
12418                                 read_dev_cntr(dd, i, CNTR_INVALID_VL);
12419                         }
12420                 }
12421                 ppd = (struct hfi1_pportdata *)(dd + 1);
12422                 for (i = 0; i < dd->num_pports; i++, ppd++) {
12423                         for (j = 0; j < PORT_CNTR_LAST; j++) {
12424                                 entry = &port_cntrs[j];
12425                                 if (entry->flags & CNTR_VL) {
12426                                         for (vl = 0; vl < C_VL_COUNT; vl++)
12427                                                 read_port_cntr(ppd, j, vl);
12428                                 } else {
12429                                         read_port_cntr(ppd, j, CNTR_INVALID_VL);
12430                                 }
12431                         }
12432                 }
12433
12434                 /*
12435                  * We want the value in the register. The goal is to keep track
12436                  * of the number of "ticks" not the counter value. In other
12437                  * words if the register rolls we want to notice it and go ahead
12438                  * and force an update.
12439                  */
12440                 entry = &dev_cntrs[C_DC_XMIT_FLITS];
12441                 dd->last_tx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12442                                                 CNTR_MODE_R, 0);
12443
12444                 entry = &dev_cntrs[C_DC_RCV_FLITS];
12445                 dd->last_rx = entry->rw_cntr(entry, dd, CNTR_INVALID_VL,
12446                                                 CNTR_MODE_R, 0);
12447
12448                 hfi1_cdbg(CNTR, "[%d] setting last tx/rx to 0x%llx 0x%llx",
12449                           dd->unit, dd->last_tx, dd->last_rx);
12450
12451         } else {
12452                 hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit);
12453         }
12454 }
12455
12456 static void update_synth_timer(struct timer_list *t)
12457 {
12458         struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
12459
12460         queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
12461         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12462 }
12463
12464 #define C_MAX_NAME 16 /* 15 chars + one for /0 */
12465 static int init_cntrs(struct hfi1_devdata *dd)
12466 {
12467         int i, rcv_ctxts, j;
12468         size_t sz;
12469         char *p;
12470         char name[C_MAX_NAME];
12471         struct hfi1_pportdata *ppd;
12472         const char *bit_type_32 = ",32";
12473         const int bit_type_32_sz = strlen(bit_type_32);
12474         u32 sdma_engines = chip_sdma_engines(dd);
12475
12476         /* set up the stats timer; the add_timer is done at the end */
12477         timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
12478
12479         /***********************/
12480         /* per device counters */
12481         /***********************/
12482
12483         /* size names and determine how many we have*/
12484         dd->ndevcntrs = 0;
12485         sz = 0;
12486
12487         for (i = 0; i < DEV_CNTR_LAST; i++) {
12488                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12489                         hfi1_dbg_early("\tSkipping %s\n", dev_cntrs[i].name);
12490                         continue;
12491                 }
12492
12493                 if (dev_cntrs[i].flags & CNTR_VL) {
12494                         dev_cntrs[i].offset = dd->ndevcntrs;
12495                         for (j = 0; j < C_VL_COUNT; j++) {
12496                                 snprintf(name, C_MAX_NAME, "%s%d",
12497                                          dev_cntrs[i].name, vl_from_idx(j));
12498                                 sz += strlen(name);
12499                                 /* Add ",32" for 32-bit counters */
12500                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12501                                         sz += bit_type_32_sz;
12502                                 sz++;
12503                                 dd->ndevcntrs++;
12504                         }
12505                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12506                         dev_cntrs[i].offset = dd->ndevcntrs;
12507                         for (j = 0; j < sdma_engines; j++) {
12508                                 snprintf(name, C_MAX_NAME, "%s%d",
12509                                          dev_cntrs[i].name, j);
12510                                 sz += strlen(name);
12511                                 /* Add ",32" for 32-bit counters */
12512                                 if (dev_cntrs[i].flags & CNTR_32BIT)
12513                                         sz += bit_type_32_sz;
12514                                 sz++;
12515                                 dd->ndevcntrs++;
12516                         }
12517                 } else {
12518                         /* +1 for newline. */
12519                         sz += strlen(dev_cntrs[i].name) + 1;
12520                         /* Add ",32" for 32-bit counters */
12521                         if (dev_cntrs[i].flags & CNTR_32BIT)
12522                                 sz += bit_type_32_sz;
12523                         dev_cntrs[i].offset = dd->ndevcntrs;
12524                         dd->ndevcntrs++;
12525                 }
12526         }
12527
12528         /* allocate space for the counter values */
12529         dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
12530                             GFP_KERNEL);
12531         if (!dd->cntrs)
12532                 goto bail;
12533
12534         dd->scntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
12535         if (!dd->scntrs)
12536                 goto bail;
12537
12538         /* allocate space for the counter names */
12539         dd->cntrnameslen = sz;
12540         dd->cntrnames = kmalloc(sz, GFP_KERNEL);
12541         if (!dd->cntrnames)
12542                 goto bail;
12543
12544         /* fill in the names */
12545         for (p = dd->cntrnames, i = 0; i < DEV_CNTR_LAST; i++) {
12546                 if (dev_cntrs[i].flags & CNTR_DISABLED) {
12547                         /* Nothing */
12548                 } else if (dev_cntrs[i].flags & CNTR_VL) {
12549                         for (j = 0; j < C_VL_COUNT; j++) {
12550                                 snprintf(name, C_MAX_NAME, "%s%d",
12551                                          dev_cntrs[i].name,
12552                                          vl_from_idx(j));
12553                                 memcpy(p, name, strlen(name));
12554                                 p += strlen(name);
12555
12556                                 /* Counter is 32 bits */
12557                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12558                                         memcpy(p, bit_type_32, bit_type_32_sz);
12559                                         p += bit_type_32_sz;
12560                                 }
12561
12562                                 *p++ = '\n';
12563                         }
12564                 } else if (dev_cntrs[i].flags & CNTR_SDMA) {
12565                         for (j = 0; j < sdma_engines; j++) {
12566                                 snprintf(name, C_MAX_NAME, "%s%d",
12567                                          dev_cntrs[i].name, j);
12568                                 memcpy(p, name, strlen(name));
12569                                 p += strlen(name);
12570
12571                                 /* Counter is 32 bits */
12572                                 if (dev_cntrs[i].flags & CNTR_32BIT) {
12573                                         memcpy(p, bit_type_32, bit_type_32_sz);
12574                                         p += bit_type_32_sz;
12575                                 }
12576
12577                                 *p++ = '\n';
12578                         }
12579                 } else {
12580                         memcpy(p, dev_cntrs[i].name, strlen(dev_cntrs[i].name));
12581                         p += strlen(dev_cntrs[i].name);
12582
12583                         /* Counter is 32 bits */
12584                         if (dev_cntrs[i].flags & CNTR_32BIT) {
12585                                 memcpy(p, bit_type_32, bit_type_32_sz);
12586                                 p += bit_type_32_sz;
12587                         }
12588
12589                         *p++ = '\n';
12590                 }
12591         }
12592
12593         /*********************/
12594         /* per port counters */
12595         /*********************/
12596
12597         /*
12598          * Go through the counters for the overflows and disable the ones we
12599          * don't need. This varies based on platform so we need to do it
12600          * dynamically here.
12601          */
12602         rcv_ctxts = dd->num_rcv_contexts;
12603         for (i = C_RCV_HDR_OVF_FIRST + rcv_ctxts;
12604              i <= C_RCV_HDR_OVF_LAST; i++) {
12605                 port_cntrs[i].flags |= CNTR_DISABLED;
12606         }
12607
12608         /* size port counter names and determine how many we have*/
12609         sz = 0;
12610         dd->nportcntrs = 0;
12611         for (i = 0; i < PORT_CNTR_LAST; i++) {
12612                 if (port_cntrs[i].flags & CNTR_DISABLED) {
12613                         hfi1_dbg_early("\tSkipping %s\n", port_cntrs[i].name);
12614                         continue;
12615                 }
12616
12617                 if (port_cntrs[i].flags & CNTR_VL) {
12618                         port_cntrs[i].offset = dd->nportcntrs;
12619                         for (j = 0; j < C_VL_COUNT; j++) {
12620                                 snprintf(name, C_MAX_NAME, "%s%d",
12621                                          port_cntrs[i].name, vl_from_idx(j));
12622                                 sz += strlen(name);
12623                                 /* Add ",32" for 32-bit counters */
12624                                 if (port_cntrs[i].flags & CNTR_32BIT)
12625                                         sz += bit_type_32_sz;
12626                                 sz++;
12627                                 dd->nportcntrs++;
12628                         }
12629                 } else {
12630                         /* +1 for newline */
12631                         sz += strlen(port_cntrs[i].name) + 1;
12632                         /* Add ",32" for 32-bit counters */
12633                         if (port_cntrs[i].flags & CNTR_32BIT)
12634                                 sz += bit_type_32_sz;
12635                         port_cntrs[i].offset = dd->nportcntrs;
12636                         dd->nportcntrs++;
12637                 }
12638         }
12639
12640         /* allocate space for the counter names */
12641         dd->portcntrnameslen = sz;
12642         dd->portcntrnames = kmalloc(sz, GFP_KERNEL);
12643         if (!dd->portcntrnames)
12644                 goto bail;
12645
12646         /* fill in port cntr names */
12647         for (p = dd->portcntrnames, i = 0; i < PORT_CNTR_LAST; i++) {
12648                 if (port_cntrs[i].flags & CNTR_DISABLED)
12649                         continue;
12650
12651                 if (port_cntrs[i].flags & CNTR_VL) {
12652                         for (j = 0; j < C_VL_COUNT; j++) {
12653                                 snprintf(name, C_MAX_NAME, "%s%d",
12654                                          port_cntrs[i].name, vl_from_idx(j));
12655                                 memcpy(p, name, strlen(name));
12656                                 p += strlen(name);
12657
12658                                 /* Counter is 32 bits */
12659                                 if (port_cntrs[i].flags & CNTR_32BIT) {
12660                                         memcpy(p, bit_type_32, bit_type_32_sz);
12661                                         p += bit_type_32_sz;
12662                                 }
12663
12664                                 *p++ = '\n';
12665                         }
12666                 } else {
12667                         memcpy(p, port_cntrs[i].name,
12668                                strlen(port_cntrs[i].name));
12669                         p += strlen(port_cntrs[i].name);
12670
12671                         /* Counter is 32 bits */
12672                         if (port_cntrs[i].flags & CNTR_32BIT) {
12673                                 memcpy(p, bit_type_32, bit_type_32_sz);
12674                                 p += bit_type_32_sz;
12675                         }
12676
12677                         *p++ = '\n';
12678                 }
12679         }
12680
12681         /* allocate per port storage for counter values */
12682         ppd = (struct hfi1_pportdata *)(dd + 1);
12683         for (i = 0; i < dd->num_pports; i++, ppd++) {
12684                 ppd->cntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12685                 if (!ppd->cntrs)
12686                         goto bail;
12687
12688                 ppd->scntrs = kcalloc(dd->nportcntrs, sizeof(u64), GFP_KERNEL);
12689                 if (!ppd->scntrs)
12690                         goto bail;
12691         }
12692
12693         /* CPU counters need to be allocated and zeroed */
12694         if (init_cpu_counters(dd))
12695                 goto bail;
12696
12697         dd->update_cntr_wq = alloc_ordered_workqueue("hfi1_update_cntr_%d",
12698                                                      WQ_MEM_RECLAIM, dd->unit);
12699         if (!dd->update_cntr_wq)
12700                 goto bail;
12701
12702         INIT_WORK(&dd->update_cntr_work, do_update_synth_timer);
12703
12704         mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
12705         return 0;
12706 bail:
12707         free_cntrs(dd);
12708         return -ENOMEM;
12709 }
12710
12711 static u32 chip_to_opa_lstate(struct hfi1_devdata *dd, u32 chip_lstate)
12712 {
12713         switch (chip_lstate) {
12714         default:
12715                 dd_dev_err(dd,
12716                            "Unknown logical state 0x%x, reporting IB_PORT_DOWN\n",
12717                            chip_lstate);
12718                 /* fall through */
12719         case LSTATE_DOWN:
12720                 return IB_PORT_DOWN;
12721         case LSTATE_INIT:
12722                 return IB_PORT_INIT;
12723         case LSTATE_ARMED:
12724                 return IB_PORT_ARMED;
12725         case LSTATE_ACTIVE:
12726                 return IB_PORT_ACTIVE;
12727         }
12728 }
12729
12730 u32 chip_to_opa_pstate(struct hfi1_devdata *dd, u32 chip_pstate)
12731 {
12732         /* look at the HFI meta-states only */
12733         switch (chip_pstate & 0xf0) {
12734         default:
12735                 dd_dev_err(dd, "Unexpected chip physical state of 0x%x\n",
12736                            chip_pstate);
12737                 /* fall through */
12738         case PLS_DISABLED:
12739                 return IB_PORTPHYSSTATE_DISABLED;
12740         case PLS_OFFLINE:
12741                 return OPA_PORTPHYSSTATE_OFFLINE;
12742         case PLS_POLLING:
12743                 return IB_PORTPHYSSTATE_POLLING;
12744         case PLS_CONFIGPHY:
12745                 return IB_PORTPHYSSTATE_TRAINING;
12746         case PLS_LINKUP:
12747                 return IB_PORTPHYSSTATE_LINKUP;
12748         case PLS_PHYTEST:
12749                 return IB_PORTPHYSSTATE_PHY_TEST;
12750         }
12751 }
12752
12753 /* return the OPA port logical state name */
12754 const char *opa_lstate_name(u32 lstate)
12755 {
12756         static const char * const port_logical_names[] = {
12757                 "PORT_NOP",
12758                 "PORT_DOWN",
12759                 "PORT_INIT",
12760                 "PORT_ARMED",
12761                 "PORT_ACTIVE",
12762                 "PORT_ACTIVE_DEFER",
12763         };
12764         if (lstate < ARRAY_SIZE(port_logical_names))
12765                 return port_logical_names[lstate];
12766         return "unknown";
12767 }
12768
12769 /* return the OPA port physical state name */
12770 const char *opa_pstate_name(u32 pstate)
12771 {
12772         static const char * const port_physical_names[] = {
12773                 "PHYS_NOP",
12774                 "reserved1",
12775                 "PHYS_POLL",
12776                 "PHYS_DISABLED",
12777                 "PHYS_TRAINING",
12778                 "PHYS_LINKUP",
12779                 "PHYS_LINK_ERR_RECOVER",
12780                 "PHYS_PHY_TEST",
12781                 "reserved8",
12782                 "PHYS_OFFLINE",
12783                 "PHYS_GANGED",
12784                 "PHYS_TEST",
12785         };
12786         if (pstate < ARRAY_SIZE(port_physical_names))
12787                 return port_physical_names[pstate];
12788         return "unknown";
12789 }
12790
12791 /**
12792  * update_statusp - Update userspace status flag
12793  * @ppd: Port data structure
12794  * @state: port state information
12795  *
12796  * Actual port status is determined by the host_link_state value
12797  * in the ppd.
12798  *
12799  * host_link_state MUST be updated before updating the user space
12800  * statusp.
12801  */
12802 static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
12803 {
12804         /*
12805          * Set port status flags in the page mapped into userspace
12806          * memory. Do it here to ensure a reliable state - this is
12807          * the only function called by all state handling code.
12808          * Always set the flags due to the fact that the cache value
12809          * might have been changed explicitly outside of this
12810          * function.
12811          */
12812         if (ppd->statusp) {
12813                 switch (state) {
12814                 case IB_PORT_DOWN:
12815                 case IB_PORT_INIT:
12816                         *ppd->statusp &= ~(HFI1_STATUS_IB_CONF |
12817                                            HFI1_STATUS_IB_READY);
12818                         break;
12819                 case IB_PORT_ARMED:
12820                         *ppd->statusp |= HFI1_STATUS_IB_CONF;
12821                         break;
12822                 case IB_PORT_ACTIVE:
12823                         *ppd->statusp |= HFI1_STATUS_IB_READY;
12824                         break;
12825                 }
12826         }
12827         dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
12828                     opa_lstate_name(state), state);
12829 }
12830
12831 /**
12832  * wait_logical_linkstate - wait for an IB link state change to occur
12833  * @ppd: port device
12834  * @state: the state to wait for
12835  * @msecs: the number of milliseconds to wait
12836  *
12837  * Wait up to msecs milliseconds for IB link state change to occur.
12838  * For now, take the easy polling route.
12839  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12840  */
12841 static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12842                                   int msecs)
12843 {
12844         unsigned long timeout;
12845         u32 new_state;
12846
12847         timeout = jiffies + msecs_to_jiffies(msecs);
12848         while (1) {
12849                 new_state = chip_to_opa_lstate(ppd->dd,
12850                                                read_logical_state(ppd->dd));
12851                 if (new_state == state)
12852                         break;
12853                 if (time_after(jiffies, timeout)) {
12854                         dd_dev_err(ppd->dd,
12855                                    "timeout waiting for link state 0x%x\n",
12856                                    state);
12857                         return -ETIMEDOUT;
12858                 }
12859                 msleep(20);
12860         }
12861
12862         return 0;
12863 }
12864
12865 static void log_state_transition(struct hfi1_pportdata *ppd, u32 state)
12866 {
12867         u32 ib_pstate = chip_to_opa_pstate(ppd->dd, state);
12868
12869         dd_dev_info(ppd->dd,
12870                     "physical state changed to %s (0x%x), phy 0x%x\n",
12871                     opa_pstate_name(ib_pstate), ib_pstate, state);
12872 }
12873
12874 /*
12875  * Read the physical hardware link state and check if it matches host
12876  * drivers anticipated state.
12877  */
12878 static void log_physical_state(struct hfi1_pportdata *ppd, u32 state)
12879 {
12880         u32 read_state = read_physical_state(ppd->dd);
12881
12882         if (read_state == state) {
12883                 log_state_transition(ppd, state);
12884         } else {
12885                 dd_dev_err(ppd->dd,
12886                            "anticipated phy link state 0x%x, read 0x%x\n",
12887                            state, read_state);
12888         }
12889 }
12890
12891 /*
12892  * wait_physical_linkstate - wait for an physical link state change to occur
12893  * @ppd: port device
12894  * @state: the state to wait for
12895  * @msecs: the number of milliseconds to wait
12896  *
12897  * Wait up to msecs milliseconds for physical link state change to occur.
12898  * Returns 0 if state reached, otherwise -ETIMEDOUT.
12899  */
12900 static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12901                                    int msecs)
12902 {
12903         u32 read_state;
12904         unsigned long timeout;
12905
12906         timeout = jiffies + msecs_to_jiffies(msecs);
12907         while (1) {
12908                 read_state = read_physical_state(ppd->dd);
12909                 if (read_state == state)
12910                         break;
12911                 if (time_after(jiffies, timeout)) {
12912                         dd_dev_err(ppd->dd,
12913                                    "timeout waiting for phy link state 0x%x\n",
12914                                    state);
12915                         return -ETIMEDOUT;
12916                 }
12917                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12918         }
12919
12920         log_state_transition(ppd, state);
12921         return 0;
12922 }
12923
12924 /*
12925  * wait_phys_link_offline_quiet_substates - wait for any offline substate
12926  * @ppd: port device
12927  * @msecs: the number of milliseconds to wait
12928  *
12929  * Wait up to msecs milliseconds for any offline physical link
12930  * state change to occur.
12931  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12932  */
12933 static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12934                                             int msecs)
12935 {
12936         u32 read_state;
12937         unsigned long timeout;
12938
12939         timeout = jiffies + msecs_to_jiffies(msecs);
12940         while (1) {
12941                 read_state = read_physical_state(ppd->dd);
12942                 if ((read_state & 0xF0) == PLS_OFFLINE)
12943                         break;
12944                 if (time_after(jiffies, timeout)) {
12945                         dd_dev_err(ppd->dd,
12946                                    "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12947                                    read_state, msecs);
12948                         return -ETIMEDOUT;
12949                 }
12950                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12951         }
12952
12953         log_state_transition(ppd, read_state);
12954         return read_state;
12955 }
12956
12957 /*
12958  * wait_phys_link_out_of_offline - wait for any out of offline state
12959  * @ppd: port device
12960  * @msecs: the number of milliseconds to wait
12961  *
12962  * Wait up to msecs milliseconds for any out of offline physical link
12963  * state change to occur.
12964  * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12965  */
12966 static int wait_phys_link_out_of_offline(struct hfi1_pportdata *ppd,
12967                                          int msecs)
12968 {
12969         u32 read_state;
12970         unsigned long timeout;
12971
12972         timeout = jiffies + msecs_to_jiffies(msecs);
12973         while (1) {
12974                 read_state = read_physical_state(ppd->dd);
12975                 if ((read_state & 0xF0) != PLS_OFFLINE)
12976                         break;
12977                 if (time_after(jiffies, timeout)) {
12978                         dd_dev_err(ppd->dd,
12979                                    "timeout waiting for phy link out of offline. Read state 0x%x, %dms\n",
12980                                    read_state, msecs);
12981                         return -ETIMEDOUT;
12982                 }
12983                 usleep_range(1950, 2050); /* sleep 2ms-ish */
12984         }
12985
12986         log_state_transition(ppd, read_state);
12987         return read_state;
12988 }
12989
12990 #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12991 (r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12992
12993 #define SET_STATIC_RATE_CONTROL_SMASK(r) \
12994 (r |= SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12995
12996 void hfi1_init_ctxt(struct send_context *sc)
12997 {
12998         if (sc) {
12999                 struct hfi1_devdata *dd = sc->dd;
13000                 u64 reg;
13001                 u8 set = (sc->type == SC_USER ?
13002                           HFI1_CAP_IS_USET(STATIC_RATE_CTRL) :
13003                           HFI1_CAP_IS_KSET(STATIC_RATE_CTRL));
13004                 reg = read_kctxt_csr(dd, sc->hw_context,
13005                                      SEND_CTXT_CHECK_ENABLE);
13006                 if (set)
13007                         CLEAR_STATIC_RATE_CONTROL_SMASK(reg);
13008                 else
13009                         SET_STATIC_RATE_CONTROL_SMASK(reg);
13010                 write_kctxt_csr(dd, sc->hw_context,
13011                                 SEND_CTXT_CHECK_ENABLE, reg);
13012         }
13013 }
13014
13015 int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
13016 {
13017         int ret = 0;
13018         u64 reg;
13019
13020         if (dd->icode != ICODE_RTL_SILICON) {
13021                 if (HFI1_CAP_IS_KSET(PRINT_UNIMPL))
13022                         dd_dev_info(dd, "%s: tempsense not supported by HW\n",
13023                                     __func__);
13024                 return -EINVAL;
13025         }
13026         reg = read_csr(dd, ASIC_STS_THERM);
13027         temp->curr = ((reg >> ASIC_STS_THERM_CURR_TEMP_SHIFT) &
13028                       ASIC_STS_THERM_CURR_TEMP_MASK);
13029         temp->lo_lim = ((reg >> ASIC_STS_THERM_LO_TEMP_SHIFT) &
13030                         ASIC_STS_THERM_LO_TEMP_MASK);
13031         temp->hi_lim = ((reg >> ASIC_STS_THERM_HI_TEMP_SHIFT) &
13032                         ASIC_STS_THERM_HI_TEMP_MASK);
13033         temp->crit_lim = ((reg >> ASIC_STS_THERM_CRIT_TEMP_SHIFT) &
13034                           ASIC_STS_THERM_CRIT_TEMP_MASK);
13035         /* triggers is a 3-bit value - 1 bit per trigger. */
13036         temp->triggers = (u8)((reg >> ASIC_STS_THERM_LOW_SHIFT) & 0x7);
13037
13038         return ret;
13039 }
13040
13041 /* ========================================================================= */
13042
13043 /**
13044  * read_mod_write() - Calculate the IRQ register index and set/clear the bits
13045  * @dd: valid devdata
13046  * @src: IRQ source to determine register index from
13047  * @bits: the bits to set or clear
13048  * @set: true == set the bits, false == clear the bits
13049  *
13050  */
13051 static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
13052                            bool set)
13053 {
13054         u64 reg;
13055         u16 idx = src / BITS_PER_REGISTER;
13056
13057         spin_lock(&dd->irq_src_lock);
13058         reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
13059         if (set)
13060                 reg |= bits;
13061         else
13062                 reg &= ~bits;
13063         write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
13064         spin_unlock(&dd->irq_src_lock);
13065 }
13066
13067 /**
13068  * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
13069  * @dd: valid devdata
13070  * @first: first IRQ source to set/clear
13071  * @last: last IRQ source (inclusive) to set/clear
13072  * @set: true == set the bits, false == clear the bits
13073  *
13074  * If first == last, set the exact source.
13075  */
13076 int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
13077 {
13078         u64 bits = 0;
13079         u64 bit;
13080         u16 src;
13081
13082         if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
13083                 return -EINVAL;
13084
13085         if (last < first)
13086                 return -ERANGE;
13087
13088         for (src = first; src <= last; src++) {
13089                 bit = src % BITS_PER_REGISTER;
13090                 /* wrapped to next register? */
13091                 if (!bit && bits) {
13092                         read_mod_write(dd, src - 1, bits, set);
13093                         bits = 0;
13094                 }
13095                 bits |= BIT_ULL(bit);
13096         }
13097         read_mod_write(dd, last, bits, set);
13098
13099         return 0;
13100 }
13101
13102 /*
13103  * Clear all interrupt sources on the chip.
13104  */
13105 void clear_all_interrupts(struct hfi1_devdata *dd)
13106 {
13107         int i;
13108
13109         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13110                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~(u64)0);
13111
13112         write_csr(dd, CCE_ERR_CLEAR, ~(u64)0);
13113         write_csr(dd, MISC_ERR_CLEAR, ~(u64)0);
13114         write_csr(dd, RCV_ERR_CLEAR, ~(u64)0);
13115         write_csr(dd, SEND_ERR_CLEAR, ~(u64)0);
13116         write_csr(dd, SEND_PIO_ERR_CLEAR, ~(u64)0);
13117         write_csr(dd, SEND_DMA_ERR_CLEAR, ~(u64)0);
13118         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~(u64)0);
13119         for (i = 0; i < chip_send_contexts(dd); i++)
13120                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~(u64)0);
13121         for (i = 0; i < chip_sdma_engines(dd); i++)
13122                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~(u64)0);
13123
13124         write_csr(dd, DCC_ERR_FLG_CLR, ~(u64)0);
13125         write_csr(dd, DC_LCB_ERR_CLR, ~(u64)0);
13126         write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
13127 }
13128
13129 /*
13130  * Remap the interrupt source from the general handler to the given MSI-X
13131  * interrupt.
13132  */
13133 void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
13134 {
13135         u64 reg;
13136         int m, n;
13137
13138         /* clear from the handled mask of the general interrupt */
13139         m = isrc / 64;
13140         n = isrc % 64;
13141         if (likely(m < CCE_NUM_INT_CSRS)) {
13142                 dd->gi_mask[m] &= ~((u64)1 << n);
13143         } else {
13144                 dd_dev_err(dd, "remap interrupt err\n");
13145                 return;
13146         }
13147
13148         /* direct the chip source to the given MSI-X interrupt */
13149         m = isrc / 8;
13150         n = isrc % 8;
13151         reg = read_csr(dd, CCE_INT_MAP + (8 * m));
13152         reg &= ~((u64)0xff << (8 * n));
13153         reg |= ((u64)msix_intr & 0xff) << (8 * n);
13154         write_csr(dd, CCE_INT_MAP + (8 * m), reg);
13155 }
13156
13157 void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
13158 {
13159         /*
13160          * SDMA engine interrupt sources grouped by type, rather than
13161          * engine.  Per-engine interrupts are as follows:
13162          *      SDMA
13163          *      SDMAProgress
13164          *      SDMAIdle
13165          */
13166         remap_intr(dd, IS_SDMA_START + engine, msix_intr);
13167         remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
13168         remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
13169 }
13170
13171 /*
13172  * Set the general handler to accept all interrupts, remap all
13173  * chip interrupts back to MSI-X 0.
13174  */
13175 void reset_interrupts(struct hfi1_devdata *dd)
13176 {
13177         int i;
13178
13179         /* all interrupts handled by the general handler */
13180         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13181                 dd->gi_mask[i] = ~(u64)0;
13182
13183         /* all chip interrupts map to MSI-X 0 */
13184         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13185                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13186 }
13187
13188 /**
13189  * set_up_interrupts() - Initialize the IRQ resources and state
13190  * @dd: valid devdata
13191  *
13192  */
13193 static int set_up_interrupts(struct hfi1_devdata *dd)
13194 {
13195         int ret;
13196
13197         /* mask all interrupts */
13198         set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
13199
13200         /* clear all pending interrupts */
13201         clear_all_interrupts(dd);
13202
13203         /* reset general handler mask, chip MSI-X mappings */
13204         reset_interrupts(dd);
13205
13206         /* ask for MSI-X interrupts */
13207         ret = msix_initialize(dd);
13208         if (ret)
13209                 return ret;
13210
13211         ret = msix_request_irqs(dd);
13212         if (ret)
13213                 msix_clean_up_interrupts(dd);
13214
13215         return ret;
13216 }
13217
13218 /*
13219  * Set up context values in dd.  Sets:
13220  *
13221  *      num_rcv_contexts - number of contexts being used
13222  *      n_krcv_queues - number of kernel contexts
13223  *      first_dyn_alloc_ctxt - first dynamically allocated context
13224  *                             in array of contexts
13225  *      freectxts  - number of free user contexts
13226  *      num_send_contexts - number of PIO send contexts being used
13227  *      num_vnic_contexts - number of contexts reserved for VNIC
13228  */
13229 static int set_up_context_variables(struct hfi1_devdata *dd)
13230 {
13231         unsigned long num_kernel_contexts;
13232         u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
13233         int total_contexts;
13234         int ret;
13235         unsigned ngroups;
13236         int rmt_count;
13237         int user_rmt_reduced;
13238         u32 n_usr_ctxts;
13239         u32 send_contexts = chip_send_contexts(dd);
13240         u32 rcv_contexts = chip_rcv_contexts(dd);
13241
13242         /*
13243          * Kernel receive contexts:
13244          * - Context 0 - control context (VL15/multicast/error)
13245          * - Context 1 - first kernel context
13246          * - Context 2 - second kernel context
13247          * ...
13248          */
13249         if (n_krcvqs)
13250                 /*
13251                  * n_krcvqs is the sum of module parameter kernel receive
13252                  * contexts, krcvqs[].  It does not include the control
13253                  * context, so add that.
13254                  */
13255                 num_kernel_contexts = n_krcvqs + 1;
13256         else
13257                 num_kernel_contexts = DEFAULT_KRCVQS + 1;
13258         /*
13259          * Every kernel receive context needs an ACK send context.
13260          * one send context is allocated for each VL{0-7} and VL15
13261          */
13262         if (num_kernel_contexts > (send_contexts - num_vls - 1)) {
13263                 dd_dev_err(dd,
13264                            "Reducing # kernel rcv contexts to: %d, from %lu\n",
13265                            send_contexts - num_vls - 1,
13266                            num_kernel_contexts);
13267                 num_kernel_contexts = send_contexts - num_vls - 1;
13268         }
13269
13270         /* Accommodate VNIC contexts if possible */
13271         if ((num_kernel_contexts + num_vnic_contexts) > rcv_contexts) {
13272                 dd_dev_err(dd, "No receive contexts available for VNIC\n");
13273                 num_vnic_contexts = 0;
13274         }
13275         total_contexts = num_kernel_contexts + num_vnic_contexts;
13276
13277         /*
13278          * User contexts:
13279          *      - default to 1 user context per real (non-HT) CPU core if
13280          *        num_user_contexts is negative
13281          */
13282         if (num_user_contexts < 0)
13283                 n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
13284         else
13285                 n_usr_ctxts = num_user_contexts;
13286         /*
13287          * Adjust the counts given a global max.
13288          */
13289         if (total_contexts + n_usr_ctxts > rcv_contexts) {
13290                 dd_dev_err(dd,
13291                            "Reducing # user receive contexts to: %d, from %u\n",
13292                            rcv_contexts - total_contexts,
13293                            n_usr_ctxts);
13294                 /* recalculate */
13295                 n_usr_ctxts = rcv_contexts - total_contexts;
13296         }
13297
13298         /*
13299          * The RMT entries are currently allocated as shown below:
13300          * 1. QOS (0 to 128 entries);
13301          * 2. FECN (num_kernel_context - 1 + num_user_contexts +
13302          *    num_vnic_contexts);
13303          * 3. VNIC (num_vnic_contexts).
13304          * It should be noted that FECN oversubscribe num_vnic_contexts
13305          * entries of RMT because both VNIC and PSM could allocate any receive
13306          * context between dd->first_dyn_alloc_text and dd->num_rcv_contexts,
13307          * and PSM FECN must reserve an RMT entry for each possible PSM receive
13308          * context.
13309          */
13310         rmt_count = qos_rmt_entries(dd, NULL, NULL) + (num_vnic_contexts * 2);
13311         if (HFI1_CAP_IS_KSET(TID_RDMA))
13312                 rmt_count += num_kernel_contexts - 1;
13313         if (rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
13314                 user_rmt_reduced = NUM_MAP_ENTRIES - rmt_count;
13315                 dd_dev_err(dd,
13316                            "RMT size is reducing the number of user receive contexts from %u to %d\n",
13317                            n_usr_ctxts,
13318                            user_rmt_reduced);
13319                 /* recalculate */
13320                 n_usr_ctxts = user_rmt_reduced;
13321         }
13322
13323         total_contexts += n_usr_ctxts;
13324
13325         /* the first N are kernel contexts, the rest are user/vnic contexts */
13326         dd->num_rcv_contexts = total_contexts;
13327         dd->n_krcv_queues = num_kernel_contexts;
13328         dd->first_dyn_alloc_ctxt = num_kernel_contexts;
13329         dd->num_vnic_contexts = num_vnic_contexts;
13330         dd->num_user_contexts = n_usr_ctxts;
13331         dd->freectxts = n_usr_ctxts;
13332         dd_dev_info(dd,
13333                     "rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
13334                     rcv_contexts,
13335                     (int)dd->num_rcv_contexts,
13336                     (int)dd->n_krcv_queues,
13337                     dd->num_vnic_contexts,
13338                     dd->num_user_contexts);
13339
13340         /*
13341          * Receive array allocation:
13342          *   All RcvArray entries are divided into groups of 8. This
13343          *   is required by the hardware and will speed up writes to
13344          *   consecutive entries by using write-combining of the entire
13345          *   cacheline.
13346          *
13347          *   The number of groups are evenly divided among all contexts.
13348          *   any left over groups will be given to the first N user
13349          *   contexts.
13350          */
13351         dd->rcv_entries.group_size = RCV_INCREMENT;
13352         ngroups = chip_rcv_array_count(dd) / dd->rcv_entries.group_size;
13353         dd->rcv_entries.ngroups = ngroups / dd->num_rcv_contexts;
13354         dd->rcv_entries.nctxt_extra = ngroups -
13355                 (dd->num_rcv_contexts * dd->rcv_entries.ngroups);
13356         dd_dev_info(dd, "RcvArray groups %u, ctxts extra %u\n",
13357                     dd->rcv_entries.ngroups,
13358                     dd->rcv_entries.nctxt_extra);
13359         if (dd->rcv_entries.ngroups * dd->rcv_entries.group_size >
13360             MAX_EAGER_ENTRIES * 2) {
13361                 dd->rcv_entries.ngroups = (MAX_EAGER_ENTRIES * 2) /
13362                         dd->rcv_entries.group_size;
13363                 dd_dev_info(dd,
13364                             "RcvArray group count too high, change to %u\n",
13365                             dd->rcv_entries.ngroups);
13366                 dd->rcv_entries.nctxt_extra = 0;
13367         }
13368         /*
13369          * PIO send contexts
13370          */
13371         ret = init_sc_pools_and_sizes(dd);
13372         if (ret >= 0) { /* success */
13373                 dd->num_send_contexts = ret;
13374                 dd_dev_info(
13375                         dd,
13376                         "send contexts: chip %d, used %d (kernel %d, ack %d, user %d, vl15 %d)\n",
13377                         send_contexts,
13378                         dd->num_send_contexts,
13379                         dd->sc_sizes[SC_KERNEL].count,
13380                         dd->sc_sizes[SC_ACK].count,
13381                         dd->sc_sizes[SC_USER].count,
13382                         dd->sc_sizes[SC_VL15].count);
13383                 ret = 0;        /* success */
13384         }
13385
13386         return ret;
13387 }
13388
13389 /*
13390  * Set the device/port partition key table. The MAD code
13391  * will ensure that, at least, the partial management
13392  * partition key is present in the table.
13393  */
13394 static void set_partition_keys(struct hfi1_pportdata *ppd)
13395 {
13396         struct hfi1_devdata *dd = ppd->dd;
13397         u64 reg = 0;
13398         int i;
13399
13400         dd_dev_info(dd, "Setting partition keys\n");
13401         for (i = 0; i < hfi1_get_npkeys(dd); i++) {
13402                 reg |= (ppd->pkeys[i] &
13403                         RCV_PARTITION_KEY_PARTITION_KEY_A_MASK) <<
13404                         ((i % 4) *
13405                          RCV_PARTITION_KEY_PARTITION_KEY_B_SHIFT);
13406                 /* Each register holds 4 PKey values. */
13407                 if ((i % 4) == 3) {
13408                         write_csr(dd, RCV_PARTITION_KEY +
13409                                   ((i - 3) * 2), reg);
13410                         reg = 0;
13411                 }
13412         }
13413
13414         /* Always enable HW pkeys check when pkeys table is set */
13415         add_rcvctrl(dd, RCV_CTRL_RCV_PARTITION_KEY_ENABLE_SMASK);
13416 }
13417
13418 /*
13419  * These CSRs and memories are uninitialized on reset and must be
13420  * written before reading to set the ECC/parity bits.
13421  *
13422  * NOTE: All user context CSRs that are not mmaped write-only
13423  * (e.g. the TID flows) must be initialized even if the driver never
13424  * reads them.
13425  */
13426 static void write_uninitialized_csrs_and_memories(struct hfi1_devdata *dd)
13427 {
13428         int i, j;
13429
13430         /* CceIntMap */
13431         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13432                 write_csr(dd, CCE_INT_MAP + (8 * i), 0);
13433
13434         /* SendCtxtCreditReturnAddr */
13435         for (i = 0; i < chip_send_contexts(dd); i++)
13436                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13437
13438         /* PIO Send buffers */
13439         /* SDMA Send buffers */
13440         /*
13441          * These are not normally read, and (presently) have no method
13442          * to be read, so are not pre-initialized
13443          */
13444
13445         /* RcvHdrAddr */
13446         /* RcvHdrTailAddr */
13447         /* RcvTidFlowTable */
13448         for (i = 0; i < chip_rcv_contexts(dd); i++) {
13449                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13450                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13451                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++)
13452                         write_uctxt_csr(dd, i, RCV_TID_FLOW_TABLE + (8 * j), 0);
13453         }
13454
13455         /* RcvArray */
13456         for (i = 0; i < chip_rcv_array_count(dd); i++)
13457                 hfi1_put_tid(dd, i, PT_INVALID_FLUSH, 0, 0);
13458
13459         /* RcvQPMapTable */
13460         for (i = 0; i < 32; i++)
13461                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13462 }
13463
13464 /*
13465  * Use the ctrl_bits in CceCtrl to clear the status_bits in CceStatus.
13466  */
13467 static void clear_cce_status(struct hfi1_devdata *dd, u64 status_bits,
13468                              u64 ctrl_bits)
13469 {
13470         unsigned long timeout;
13471         u64 reg;
13472
13473         /* is the condition present? */
13474         reg = read_csr(dd, CCE_STATUS);
13475         if ((reg & status_bits) == 0)
13476                 return;
13477
13478         /* clear the condition */
13479         write_csr(dd, CCE_CTRL, ctrl_bits);
13480
13481         /* wait for the condition to clear */
13482         timeout = jiffies + msecs_to_jiffies(CCE_STATUS_TIMEOUT);
13483         while (1) {
13484                 reg = read_csr(dd, CCE_STATUS);
13485                 if ((reg & status_bits) == 0)
13486                         return;
13487                 if (time_after(jiffies, timeout)) {
13488                         dd_dev_err(dd,
13489                                    "Timeout waiting for CceStatus to clear bits 0x%llx, remaining 0x%llx\n",
13490                                    status_bits, reg & status_bits);
13491                         return;
13492                 }
13493                 udelay(1);
13494         }
13495 }
13496
13497 /* set CCE CSRs to chip reset defaults */
13498 static void reset_cce_csrs(struct hfi1_devdata *dd)
13499 {
13500         int i;
13501
13502         /* CCE_REVISION read-only */
13503         /* CCE_REVISION2 read-only */
13504         /* CCE_CTRL - bits clear automatically */
13505         /* CCE_STATUS read-only, use CceCtrl to clear */
13506         clear_cce_status(dd, ALL_FROZE, CCE_CTRL_SPC_UNFREEZE_SMASK);
13507         clear_cce_status(dd, ALL_TXE_PAUSE, CCE_CTRL_TXE_RESUME_SMASK);
13508         clear_cce_status(dd, ALL_RXE_PAUSE, CCE_CTRL_RXE_RESUME_SMASK);
13509         for (i = 0; i < CCE_NUM_SCRATCH; i++)
13510                 write_csr(dd, CCE_SCRATCH + (8 * i), 0);
13511         /* CCE_ERR_STATUS read-only */
13512         write_csr(dd, CCE_ERR_MASK, 0);
13513         write_csr(dd, CCE_ERR_CLEAR, ~0ull);
13514         /* CCE_ERR_FORCE leave alone */
13515         for (i = 0; i < CCE_NUM_32_BIT_COUNTERS; i++)
13516                 write_csr(dd, CCE_COUNTER_ARRAY32 + (8 * i), 0);
13517         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_RESETCSR);
13518         /* CCE_PCIE_CTRL leave alone */
13519         for (i = 0; i < CCE_NUM_MSIX_VECTORS; i++) {
13520                 write_csr(dd, CCE_MSIX_TABLE_LOWER + (8 * i), 0);
13521                 write_csr(dd, CCE_MSIX_TABLE_UPPER + (8 * i),
13522                           CCE_MSIX_TABLE_UPPER_RESETCSR);
13523         }
13524         for (i = 0; i < CCE_NUM_MSIX_PBAS; i++) {
13525                 /* CCE_MSIX_PBA read-only */
13526                 write_csr(dd, CCE_MSIX_INT_GRANTED, ~0ull);
13527                 write_csr(dd, CCE_MSIX_VEC_CLR_WITHOUT_INT, ~0ull);
13528         }
13529         for (i = 0; i < CCE_NUM_INT_MAP_CSRS; i++)
13530                 write_csr(dd, CCE_INT_MAP, 0);
13531         for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
13532                 /* CCE_INT_STATUS read-only */
13533                 write_csr(dd, CCE_INT_MASK + (8 * i), 0);
13534                 write_csr(dd, CCE_INT_CLEAR + (8 * i), ~0ull);
13535                 /* CCE_INT_FORCE leave alone */
13536                 /* CCE_INT_BLOCKED read-only */
13537         }
13538         for (i = 0; i < CCE_NUM_32_BIT_INT_COUNTERS; i++)
13539                 write_csr(dd, CCE_INT_COUNTER_ARRAY32 + (8 * i), 0);
13540 }
13541
13542 /* set MISC CSRs to chip reset defaults */
13543 static void reset_misc_csrs(struct hfi1_devdata *dd)
13544 {
13545         int i;
13546
13547         for (i = 0; i < 32; i++) {
13548                 write_csr(dd, MISC_CFG_RSA_R2 + (8 * i), 0);
13549                 write_csr(dd, MISC_CFG_RSA_SIGNATURE + (8 * i), 0);
13550                 write_csr(dd, MISC_CFG_RSA_MODULUS + (8 * i), 0);
13551         }
13552         /*
13553          * MISC_CFG_SHA_PRELOAD leave alone - always reads 0 and can
13554          * only be written 128-byte chunks
13555          */
13556         /* init RSA engine to clear lingering errors */
13557         write_csr(dd, MISC_CFG_RSA_CMD, 1);
13558         write_csr(dd, MISC_CFG_RSA_MU, 0);
13559         write_csr(dd, MISC_CFG_FW_CTRL, 0);
13560         /* MISC_STS_8051_DIGEST read-only */
13561         /* MISC_STS_SBM_DIGEST read-only */
13562         /* MISC_STS_PCIE_DIGEST read-only */
13563         /* MISC_STS_FAB_DIGEST read-only */
13564         /* MISC_ERR_STATUS read-only */
13565         write_csr(dd, MISC_ERR_MASK, 0);
13566         write_csr(dd, MISC_ERR_CLEAR, ~0ull);
13567         /* MISC_ERR_FORCE leave alone */
13568 }
13569
13570 /* set TXE CSRs to chip reset defaults */
13571 static void reset_txe_csrs(struct hfi1_devdata *dd)
13572 {
13573         int i;
13574
13575         /*
13576          * TXE Kernel CSRs
13577          */
13578         write_csr(dd, SEND_CTRL, 0);
13579         __cm_reset(dd, 0);      /* reset CM internal state */
13580         /* SEND_CONTEXTS read-only */
13581         /* SEND_DMA_ENGINES read-only */
13582         /* SEND_PIO_MEM_SIZE read-only */
13583         /* SEND_DMA_MEM_SIZE read-only */
13584         write_csr(dd, SEND_HIGH_PRIORITY_LIMIT, 0);
13585         pio_reset_all(dd);      /* SEND_PIO_INIT_CTXT */
13586         /* SEND_PIO_ERR_STATUS read-only */
13587         write_csr(dd, SEND_PIO_ERR_MASK, 0);
13588         write_csr(dd, SEND_PIO_ERR_CLEAR, ~0ull);
13589         /* SEND_PIO_ERR_FORCE leave alone */
13590         /* SEND_DMA_ERR_STATUS read-only */
13591         write_csr(dd, SEND_DMA_ERR_MASK, 0);
13592         write_csr(dd, SEND_DMA_ERR_CLEAR, ~0ull);
13593         /* SEND_DMA_ERR_FORCE leave alone */
13594         /* SEND_EGRESS_ERR_STATUS read-only */
13595         write_csr(dd, SEND_EGRESS_ERR_MASK, 0);
13596         write_csr(dd, SEND_EGRESS_ERR_CLEAR, ~0ull);
13597         /* SEND_EGRESS_ERR_FORCE leave alone */
13598         write_csr(dd, SEND_BTH_QP, 0);
13599         write_csr(dd, SEND_STATIC_RATE_CONTROL, 0);
13600         write_csr(dd, SEND_SC2VLT0, 0);
13601         write_csr(dd, SEND_SC2VLT1, 0);
13602         write_csr(dd, SEND_SC2VLT2, 0);
13603         write_csr(dd, SEND_SC2VLT3, 0);
13604         write_csr(dd, SEND_LEN_CHECK0, 0);
13605         write_csr(dd, SEND_LEN_CHECK1, 0);
13606         /* SEND_ERR_STATUS read-only */
13607         write_csr(dd, SEND_ERR_MASK, 0);
13608         write_csr(dd, SEND_ERR_CLEAR, ~0ull);
13609         /* SEND_ERR_FORCE read-only */
13610         for (i = 0; i < VL_ARB_LOW_PRIO_TABLE_SIZE; i++)
13611                 write_csr(dd, SEND_LOW_PRIORITY_LIST + (8 * i), 0);
13612         for (i = 0; i < VL_ARB_HIGH_PRIO_TABLE_SIZE; i++)
13613                 write_csr(dd, SEND_HIGH_PRIORITY_LIST + (8 * i), 0);
13614         for (i = 0; i < chip_send_contexts(dd) / NUM_CONTEXTS_PER_SET; i++)
13615                 write_csr(dd, SEND_CONTEXT_SET_CTRL + (8 * i), 0);
13616         for (i = 0; i < TXE_NUM_32_BIT_COUNTER; i++)
13617                 write_csr(dd, SEND_COUNTER_ARRAY32 + (8 * i), 0);
13618         for (i = 0; i < TXE_NUM_64_BIT_COUNTER; i++)
13619                 write_csr(dd, SEND_COUNTER_ARRAY64 + (8 * i), 0);
13620         write_csr(dd, SEND_CM_CTRL, SEND_CM_CTRL_RESETCSR);
13621         write_csr(dd, SEND_CM_GLOBAL_CREDIT, SEND_CM_GLOBAL_CREDIT_RESETCSR);
13622         /* SEND_CM_CREDIT_USED_STATUS read-only */
13623         write_csr(dd, SEND_CM_TIMER_CTRL, 0);
13624         write_csr(dd, SEND_CM_LOCAL_AU_TABLE0_TO3, 0);
13625         write_csr(dd, SEND_CM_LOCAL_AU_TABLE4_TO7, 0);
13626         write_csr(dd, SEND_CM_REMOTE_AU_TABLE0_TO3, 0);
13627         write_csr(dd, SEND_CM_REMOTE_AU_TABLE4_TO7, 0);
13628         for (i = 0; i < TXE_NUM_DATA_VL; i++)
13629                 write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
13630         write_csr(dd, SEND_CM_CREDIT_VL15, 0);
13631         /* SEND_CM_CREDIT_USED_VL read-only */
13632         /* SEND_CM_CREDIT_USED_VL15 read-only */
13633         /* SEND_EGRESS_CTXT_STATUS read-only */
13634         /* SEND_EGRESS_SEND_DMA_STATUS read-only */
13635         write_csr(dd, SEND_EGRESS_ERR_INFO, ~0ull);
13636         /* SEND_EGRESS_ERR_INFO read-only */
13637         /* SEND_EGRESS_ERR_SOURCE read-only */
13638
13639         /*
13640          * TXE Per-Context CSRs
13641          */
13642         for (i = 0; i < chip_send_contexts(dd); i++) {
13643                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13644                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_CTRL, 0);
13645                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_RETURN_ADDR, 0);
13646                 write_kctxt_csr(dd, i, SEND_CTXT_CREDIT_FORCE, 0);
13647                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, 0);
13648                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_CLEAR, ~0ull);
13649                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_ENABLE, 0);
13650                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_VL, 0);
13651                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_JOB_KEY, 0);
13652                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_PARTITION_KEY, 0);
13653                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_SLID, 0);
13654                 write_kctxt_csr(dd, i, SEND_CTXT_CHECK_OPCODE, 0);
13655         }
13656
13657         /*
13658          * TXE Per-SDMA CSRs
13659          */
13660         for (i = 0; i < chip_sdma_engines(dd); i++) {
13661                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13662                 /* SEND_DMA_STATUS read-only */
13663                 write_kctxt_csr(dd, i, SEND_DMA_BASE_ADDR, 0);
13664                 write_kctxt_csr(dd, i, SEND_DMA_LEN_GEN, 0);
13665                 write_kctxt_csr(dd, i, SEND_DMA_TAIL, 0);
13666                 /* SEND_DMA_HEAD read-only */
13667                 write_kctxt_csr(dd, i, SEND_DMA_HEAD_ADDR, 0);
13668                 write_kctxt_csr(dd, i, SEND_DMA_PRIORITY_THLD, 0);
13669                 /* SEND_DMA_IDLE_CNT read-only */
13670                 write_kctxt_csr(dd, i, SEND_DMA_RELOAD_CNT, 0);
13671                 write_kctxt_csr(dd, i, SEND_DMA_DESC_CNT, 0);
13672                 /* SEND_DMA_DESC_FETCHED_CNT read-only */
13673                 /* SEND_DMA_ENG_ERR_STATUS read-only */
13674                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, 0);
13675                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_CLEAR, ~0ull);
13676                 /* SEND_DMA_ENG_ERR_FORCE leave alone */
13677                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_ENABLE, 0);
13678                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_VL, 0);
13679                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_JOB_KEY, 0);
13680                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_PARTITION_KEY, 0);
13681                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_SLID, 0);
13682                 write_kctxt_csr(dd, i, SEND_DMA_CHECK_OPCODE, 0);
13683                 write_kctxt_csr(dd, i, SEND_DMA_MEMORY, 0);
13684         }
13685 }
13686
13687 /*
13688  * Expect on entry:
13689  * o Packet ingress is disabled, i.e. RcvCtrl.RcvPortEnable == 0
13690  */
13691 static void init_rbufs(struct hfi1_devdata *dd)
13692 {
13693         u64 reg;
13694         int count;
13695
13696         /*
13697          * Wait for DMA to stop: RxRbufPktPending and RxPktInProgress are
13698          * clear.
13699          */
13700         count = 0;
13701         while (1) {
13702                 reg = read_csr(dd, RCV_STATUS);
13703                 if ((reg & (RCV_STATUS_RX_RBUF_PKT_PENDING_SMASK
13704                             | RCV_STATUS_RX_PKT_IN_PROGRESS_SMASK)) == 0)
13705                         break;
13706                 /*
13707                  * Give up after 1ms - maximum wait time.
13708                  *
13709                  * RBuf size is 136KiB.  Slowest possible is PCIe Gen1 x1 at
13710                  * 250MB/s bandwidth.  Lower rate to 66% for overhead to get:
13711                  *      136 KB / (66% * 250MB/s) = 844us
13712                  */
13713                 if (count++ > 500) {
13714                         dd_dev_err(dd,
13715                                    "%s: in-progress DMA not clearing: RcvStatus 0x%llx, continuing\n",
13716                                    __func__, reg);
13717                         break;
13718                 }
13719                 udelay(2); /* do not busy-wait the CSR */
13720         }
13721
13722         /* start the init - expect RcvCtrl to be 0 */
13723         write_csr(dd, RCV_CTRL, RCV_CTRL_RX_RBUF_INIT_SMASK);
13724
13725         /*
13726          * Read to force the write of Rcvtrl.RxRbufInit.  There is a brief
13727          * period after the write before RcvStatus.RxRbufInitDone is valid.
13728          * The delay in the first run through the loop below is sufficient and
13729          * required before the first read of RcvStatus.RxRbufInintDone.
13730          */
13731         read_csr(dd, RCV_CTRL);
13732
13733         /* wait for the init to finish */
13734         count = 0;
13735         while (1) {
13736                 /* delay is required first time through - see above */
13737                 udelay(2); /* do not busy-wait the CSR */
13738                 reg = read_csr(dd, RCV_STATUS);
13739                 if (reg & (RCV_STATUS_RX_RBUF_INIT_DONE_SMASK))
13740                         break;
13741
13742                 /* give up after 100us - slowest possible at 33MHz is 73us */
13743                 if (count++ > 50) {
13744                         dd_dev_err(dd,
13745                                    "%s: RcvStatus.RxRbufInit not set, continuing\n",
13746                                    __func__);
13747                         break;
13748                 }
13749         }
13750 }
13751
13752 /* set RXE CSRs to chip reset defaults */
13753 static void reset_rxe_csrs(struct hfi1_devdata *dd)
13754 {
13755         int i, j;
13756
13757         /*
13758          * RXE Kernel CSRs
13759          */
13760         write_csr(dd, RCV_CTRL, 0);
13761         init_rbufs(dd);
13762         /* RCV_STATUS read-only */
13763         /* RCV_CONTEXTS read-only */
13764         /* RCV_ARRAY_CNT read-only */
13765         /* RCV_BUF_SIZE read-only */
13766         write_csr(dd, RCV_BTH_QP, 0);
13767         write_csr(dd, RCV_MULTICAST, 0);
13768         write_csr(dd, RCV_BYPASS, 0);
13769         write_csr(dd, RCV_VL15, 0);
13770         /* this is a clear-down */
13771         write_csr(dd, RCV_ERR_INFO,
13772                   RCV_ERR_INFO_RCV_EXCESS_BUFFER_OVERRUN_SMASK);
13773         /* RCV_ERR_STATUS read-only */
13774         write_csr(dd, RCV_ERR_MASK, 0);
13775         write_csr(dd, RCV_ERR_CLEAR, ~0ull);
13776         /* RCV_ERR_FORCE leave alone */
13777         for (i = 0; i < 32; i++)
13778                 write_csr(dd, RCV_QP_MAP_TABLE + (8 * i), 0);
13779         for (i = 0; i < 4; i++)
13780                 write_csr(dd, RCV_PARTITION_KEY + (8 * i), 0);
13781         for (i = 0; i < RXE_NUM_32_BIT_COUNTERS; i++)
13782                 write_csr(dd, RCV_COUNTER_ARRAY32 + (8 * i), 0);
13783         for (i = 0; i < RXE_NUM_64_BIT_COUNTERS; i++)
13784                 write_csr(dd, RCV_COUNTER_ARRAY64 + (8 * i), 0);
13785         for (i = 0; i < RXE_NUM_RSM_INSTANCES; i++)
13786                 clear_rsm_rule(dd, i);
13787         for (i = 0; i < 32; i++)
13788                 write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), 0);
13789
13790         /*
13791          * RXE Kernel and User Per-Context CSRs
13792          */
13793         for (i = 0; i < chip_rcv_contexts(dd); i++) {
13794                 /* kernel */
13795                 write_kctxt_csr(dd, i, RCV_CTXT_CTRL, 0);
13796                 /* RCV_CTXT_STATUS read-only */
13797                 write_kctxt_csr(dd, i, RCV_EGR_CTRL, 0);
13798                 write_kctxt_csr(dd, i, RCV_TID_CTRL, 0);
13799                 write_kctxt_csr(dd, i, RCV_KEY_CTRL, 0);
13800                 write_kctxt_csr(dd, i, RCV_HDR_ADDR, 0);
13801                 write_kctxt_csr(dd, i, RCV_HDR_CNT, 0);
13802                 write_kctxt_csr(dd, i, RCV_HDR_ENT_SIZE, 0);
13803                 write_kctxt_csr(dd, i, RCV_HDR_SIZE, 0);
13804                 write_kctxt_csr(dd, i, RCV_HDR_TAIL_ADDR, 0);
13805                 write_kctxt_csr(dd, i, RCV_AVAIL_TIME_OUT, 0);
13806                 write_kctxt_csr(dd, i, RCV_HDR_OVFL_CNT, 0);
13807
13808                 /* user */
13809                 /* RCV_HDR_TAIL read-only */
13810                 write_uctxt_csr(dd, i, RCV_HDR_HEAD, 0);
13811                 /* RCV_EGR_INDEX_TAIL read-only */
13812                 write_uctxt_csr(dd, i, RCV_EGR_INDEX_HEAD, 0);
13813                 /* RCV_EGR_OFFSET_TAIL read-only */
13814                 for (j = 0; j < RXE_NUM_TID_FLOWS; j++) {
13815                         write_uctxt_csr(dd, i,
13816                                         RCV_TID_FLOW_TABLE + (8 * j), 0);
13817                 }
13818         }
13819 }
13820
13821 /*
13822  * Set sc2vl tables.
13823  *
13824  * They power on to zeros, so to avoid send context errors
13825  * they need to be set:
13826  *
13827  * SC 0-7 -> VL 0-7 (respectively)
13828  * SC 15  -> VL 15
13829  * otherwise
13830  *        -> VL 0
13831  */
13832 static void init_sc2vl_tables(struct hfi1_devdata *dd)
13833 {
13834         int i;
13835         /* init per architecture spec, constrained by hardware capability */
13836
13837         /* HFI maps sent packets */
13838         write_csr(dd, SEND_SC2VLT0, SC2VL_VAL(
13839                 0,
13840                 0, 0, 1, 1,
13841                 2, 2, 3, 3,
13842                 4, 4, 5, 5,
13843                 6, 6, 7, 7));
13844         write_csr(dd, SEND_SC2VLT1, SC2VL_VAL(
13845                 1,
13846                 8, 0, 9, 0,
13847                 10, 0, 11, 0,
13848                 12, 0, 13, 0,
13849                 14, 0, 15, 15));
13850         write_csr(dd, SEND_SC2VLT2, SC2VL_VAL(
13851                 2,
13852                 16, 0, 17, 0,
13853                 18, 0, 19, 0,
13854                 20, 0, 21, 0,
13855                 22, 0, 23, 0));
13856         write_csr(dd, SEND_SC2VLT3, SC2VL_VAL(
13857                 3,
13858                 24, 0, 25, 0,
13859                 26, 0, 27, 0,
13860                 28, 0, 29, 0,
13861                 30, 0, 31, 0));
13862
13863         /* DC maps received packets */
13864         write_csr(dd, DCC_CFG_SC_VL_TABLE_15_0, DC_SC_VL_VAL(
13865                 15_0,
13866                 0, 0, 1, 1,  2, 2,  3, 3,  4, 4,  5, 5,  6, 6,  7,  7,
13867                 8, 0, 9, 0, 10, 0, 11, 0, 12, 0, 13, 0, 14, 0, 15, 15));
13868         write_csr(dd, DCC_CFG_SC_VL_TABLE_31_16, DC_SC_VL_VAL(
13869                 31_16,
13870                 16, 0, 17, 0, 18, 0, 19, 0, 20, 0, 21, 0, 22, 0, 23, 0,
13871                 24, 0, 25, 0, 26, 0, 27, 0, 28, 0, 29, 0, 30, 0, 31, 0));
13872
13873         /* initialize the cached sc2vl values consistently with h/w */
13874         for (i = 0; i < 32; i++) {
13875                 if (i < 8 || i == 15)
13876                         *((u8 *)(dd->sc2vl) + i) = (u8)i;
13877                 else
13878                         *((u8 *)(dd->sc2vl) + i) = 0;
13879         }
13880 }
13881
13882 /*
13883  * Read chip sizes and then reset parts to sane, disabled, values.  We cannot
13884  * depend on the chip going through a power-on reset - a driver may be loaded
13885  * and unloaded many times.
13886  *
13887  * Do not write any CSR values to the chip in this routine - there may be
13888  * a reset following the (possible) FLR in this routine.
13889  *
13890  */
13891 static int init_chip(struct hfi1_devdata *dd)
13892 {
13893         int i;
13894         int ret = 0;
13895
13896         /*
13897          * Put the HFI CSRs in a known state.
13898          * Combine this with a DC reset.
13899          *
13900          * Stop the device from doing anything while we do a
13901          * reset.  We know there are no other active users of
13902          * the device since we are now in charge.  Turn off
13903          * off all outbound and inbound traffic and make sure
13904          * the device does not generate any interrupts.
13905          */
13906
13907         /* disable send contexts and SDMA engines */
13908         write_csr(dd, SEND_CTRL, 0);
13909         for (i = 0; i < chip_send_contexts(dd); i++)
13910                 write_kctxt_csr(dd, i, SEND_CTXT_CTRL, 0);
13911         for (i = 0; i < chip_sdma_engines(dd); i++)
13912                 write_kctxt_csr(dd, i, SEND_DMA_CTRL, 0);
13913         /* disable port (turn off RXE inbound traffic) and contexts */
13914         write_csr(dd, RCV_CTRL, 0);
13915         for (i = 0; i < chip_rcv_contexts(dd); i++)
13916                 write_csr(dd, RCV_CTXT_CTRL, 0);
13917         /* mask all interrupt sources */
13918         for (i = 0; i < CCE_NUM_INT_CSRS; i++)
13919                 write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
13920
13921         /*
13922          * DC Reset: do a full DC reset before the register clear.
13923          * A recommended length of time to hold is one CSR read,
13924          * so reread the CceDcCtrl.  Then, hold the DC in reset
13925          * across the clear.
13926          */
13927         write_csr(dd, CCE_DC_CTRL, CCE_DC_CTRL_DC_RESET_SMASK);
13928         (void)read_csr(dd, CCE_DC_CTRL);
13929
13930         if (use_flr) {
13931                 /*
13932                  * A FLR will reset the SPC core and part of the PCIe.
13933                  * The parts that need to be restored have already been
13934                  * saved.
13935                  */
13936                 dd_dev_info(dd, "Resetting CSRs with FLR\n");
13937
13938                 /* do the FLR, the DC reset will remain */
13939                 pcie_flr(dd->pcidev);
13940
13941                 /* restore command and BARs */
13942                 ret = restore_pci_variables(dd);
13943                 if (ret) {
13944                         dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13945                                    __func__);
13946                         return ret;
13947                 }
13948
13949                 if (is_ax(dd)) {
13950                         dd_dev_info(dd, "Resetting CSRs with FLR\n");
13951                         pcie_flr(dd->pcidev);
13952                         ret = restore_pci_variables(dd);
13953                         if (ret) {
13954                                 dd_dev_err(dd, "%s: Could not restore PCI variables\n",
13955                                            __func__);
13956                                 return ret;
13957                         }
13958                 }
13959         } else {
13960                 dd_dev_info(dd, "Resetting CSRs with writes\n");
13961                 reset_cce_csrs(dd);
13962                 reset_txe_csrs(dd);
13963                 reset_rxe_csrs(dd);
13964                 reset_misc_csrs(dd);
13965         }
13966         /* clear the DC reset */
13967         write_csr(dd, CCE_DC_CTRL, 0);
13968
13969         /* Set the LED off */
13970         setextled(dd, 0);
13971
13972         /*
13973          * Clear the QSFP reset.
13974          * An FLR enforces a 0 on all out pins. The driver does not touch
13975          * ASIC_QSFPn_OUT otherwise.  This leaves RESET_N low and
13976          * anything plugged constantly in reset, if it pays attention
13977          * to RESET_N.
13978          * Prime examples of this are optical cables. Set all pins high.
13979          * I2CCLK and I2CDAT will change per direction, and INT_N and
13980          * MODPRS_N are input only and their value is ignored.
13981          */
13982         write_csr(dd, ASIC_QSFP1_OUT, 0x1f);
13983         write_csr(dd, ASIC_QSFP2_OUT, 0x1f);
13984         init_chip_resources(dd);
13985         return ret;
13986 }
13987
13988 static void init_early_variables(struct hfi1_devdata *dd)
13989 {
13990         int i;
13991
13992         /* assign link credit variables */
13993         dd->vau = CM_VAU;
13994         dd->link_credits = CM_GLOBAL_CREDITS;
13995         if (is_ax(dd))
13996                 dd->link_credits--;
13997         dd->vcu = cu_to_vcu(hfi1_cu);
13998         /* enough room for 8 MAD packets plus header - 17K */
13999         dd->vl15_init = (8 * (2048 + 128)) / vau_to_au(dd->vau);
14000         if (dd->vl15_init > dd->link_credits)
14001                 dd->vl15_init = dd->link_credits;
14002
14003         write_uninitialized_csrs_and_memories(dd);
14004
14005         if (HFI1_CAP_IS_KSET(PKEY_CHECK))
14006                 for (i = 0; i < dd->num_pports; i++) {
14007                         struct hfi1_pportdata *ppd = &dd->pport[i];
14008
14009                         set_partition_keys(ppd);
14010                 }
14011         init_sc2vl_tables(dd);
14012 }
14013
14014 static void init_kdeth_qp(struct hfi1_devdata *dd)
14015 {
14016         /* user changed the KDETH_QP */
14017         if (kdeth_qp != 0 && kdeth_qp >= 0xff) {
14018                 /* out of range or illegal value */
14019                 dd_dev_err(dd, "Invalid KDETH queue pair prefix, ignoring");
14020                 kdeth_qp = 0;
14021         }
14022         if (kdeth_qp == 0)      /* not set, or failed range check */
14023                 kdeth_qp = DEFAULT_KDETH_QP;
14024
14025         write_csr(dd, SEND_BTH_QP,
14026                   (kdeth_qp & SEND_BTH_QP_KDETH_QP_MASK) <<
14027                   SEND_BTH_QP_KDETH_QP_SHIFT);
14028
14029         write_csr(dd, RCV_BTH_QP,
14030                   (kdeth_qp & RCV_BTH_QP_KDETH_QP_MASK) <<
14031                   RCV_BTH_QP_KDETH_QP_SHIFT);
14032 }
14033
14034 /**
14035  * hfi1_get_qp_map
14036  * @dd: device data
14037  * @idx: index to read
14038  */
14039 u8 hfi1_get_qp_map(struct hfi1_devdata *dd, u8 idx)
14040 {
14041         u64 reg = read_csr(dd, RCV_QP_MAP_TABLE + (idx / 8) * 8);
14042
14043         reg >>= (idx % 8) * 8;
14044         return reg;
14045 }
14046
14047 /**
14048  * init_qpmap_table
14049  * @dd - device data
14050  * @first_ctxt - first context
14051  * @last_ctxt - first context
14052  *
14053  * This return sets the qpn mapping table that
14054  * is indexed by qpn[8:1].
14055  *
14056  * The routine will round robin the 256 settings
14057  * from first_ctxt to last_ctxt.
14058  *
14059  * The first/last looks ahead to having specialized
14060  * receive contexts for mgmt and bypass.  Normal
14061  * verbs traffic will assumed to be on a range
14062  * of receive contexts.
14063  */
14064 static void init_qpmap_table(struct hfi1_devdata *dd,
14065                              u32 first_ctxt,
14066                              u32 last_ctxt)
14067 {
14068         u64 reg = 0;
14069         u64 regno = RCV_QP_MAP_TABLE;
14070         int i;
14071         u64 ctxt = first_ctxt;
14072
14073         for (i = 0; i < 256; i++) {
14074                 reg |= ctxt << (8 * (i % 8));
14075                 ctxt++;
14076                 if (ctxt > last_ctxt)
14077                         ctxt = first_ctxt;
14078                 if (i % 8 == 7) {
14079                         write_csr(dd, regno, reg);
14080                         reg = 0;
14081                         regno += 8;
14082                 }
14083         }
14084
14085         add_rcvctrl(dd, RCV_CTRL_RCV_QP_MAP_ENABLE_SMASK
14086                         | RCV_CTRL_RCV_BYPASS_ENABLE_SMASK);
14087 }
14088
14089 struct rsm_map_table {
14090         u64 map[NUM_MAP_REGS];
14091         unsigned int used;
14092 };
14093
14094 struct rsm_rule_data {
14095         u8 offset;
14096         u8 pkt_type;
14097         u32 field1_off;
14098         u32 field2_off;
14099         u32 index1_off;
14100         u32 index1_width;
14101         u32 index2_off;
14102         u32 index2_width;
14103         u32 mask1;
14104         u32 value1;
14105         u32 mask2;
14106         u32 value2;
14107 };
14108
14109 /*
14110  * Return an initialized RMT map table for users to fill in.  OK if it
14111  * returns NULL, indicating no table.
14112  */
14113 static struct rsm_map_table *alloc_rsm_map_table(struct hfi1_devdata *dd)
14114 {
14115         struct rsm_map_table *rmt;
14116         u8 rxcontext = is_ax(dd) ? 0 : 0xff;  /* 0 is default if a0 ver. */
14117
14118         rmt = kmalloc(sizeof(*rmt), GFP_KERNEL);
14119         if (rmt) {
14120                 memset(rmt->map, rxcontext, sizeof(rmt->map));
14121                 rmt->used = 0;
14122         }
14123
14124         return rmt;
14125 }
14126
14127 /*
14128  * Write the final RMT map table to the chip and free the table.  OK if
14129  * table is NULL.
14130  */
14131 static void complete_rsm_map_table(struct hfi1_devdata *dd,
14132                                    struct rsm_map_table *rmt)
14133 {
14134         int i;
14135
14136         if (rmt) {
14137                 /* write table to chip */
14138                 for (i = 0; i < NUM_MAP_REGS; i++)
14139                         write_csr(dd, RCV_RSM_MAP_TABLE + (8 * i), rmt->map[i]);
14140
14141                 /* enable RSM */
14142                 add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14143         }
14144 }
14145
14146 /*
14147  * Add a receive side mapping rule.
14148  */
14149 static void add_rsm_rule(struct hfi1_devdata *dd, u8 rule_index,
14150                          struct rsm_rule_data *rrd)
14151 {
14152         write_csr(dd, RCV_RSM_CFG + (8 * rule_index),
14153                   (u64)rrd->offset << RCV_RSM_CFG_OFFSET_SHIFT |
14154                   1ull << rule_index | /* enable bit */
14155                   (u64)rrd->pkt_type << RCV_RSM_CFG_PACKET_TYPE_SHIFT);
14156         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index),
14157                   (u64)rrd->field1_off << RCV_RSM_SELECT_FIELD1_OFFSET_SHIFT |
14158                   (u64)rrd->field2_off << RCV_RSM_SELECT_FIELD2_OFFSET_SHIFT |
14159                   (u64)rrd->index1_off << RCV_RSM_SELECT_INDEX1_OFFSET_SHIFT |
14160                   (u64)rrd->index1_width << RCV_RSM_SELECT_INDEX1_WIDTH_SHIFT |
14161                   (u64)rrd->index2_off << RCV_RSM_SELECT_INDEX2_OFFSET_SHIFT |
14162                   (u64)rrd->index2_width << RCV_RSM_SELECT_INDEX2_WIDTH_SHIFT);
14163         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index),
14164                   (u64)rrd->mask1 << RCV_RSM_MATCH_MASK1_SHIFT |
14165                   (u64)rrd->value1 << RCV_RSM_MATCH_VALUE1_SHIFT |
14166                   (u64)rrd->mask2 << RCV_RSM_MATCH_MASK2_SHIFT |
14167                   (u64)rrd->value2 << RCV_RSM_MATCH_VALUE2_SHIFT);
14168 }
14169
14170 /*
14171  * Clear a receive side mapping rule.
14172  */
14173 static void clear_rsm_rule(struct hfi1_devdata *dd, u8 rule_index)
14174 {
14175         write_csr(dd, RCV_RSM_CFG + (8 * rule_index), 0);
14176         write_csr(dd, RCV_RSM_SELECT + (8 * rule_index), 0);
14177         write_csr(dd, RCV_RSM_MATCH + (8 * rule_index), 0);
14178 }
14179
14180 /* return the number of RSM map table entries that will be used for QOS */
14181 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
14182                            unsigned int *np)
14183 {
14184         int i;
14185         unsigned int m, n;
14186         u8 max_by_vl = 0;
14187
14188         /* is QOS active at all? */
14189         if (dd->n_krcv_queues <= MIN_KERNEL_KCTXTS ||
14190             num_vls == 1 ||
14191             krcvqsset <= 1)
14192                 goto no_qos;
14193
14194         /* determine bits for qpn */
14195         for (i = 0; i < min_t(unsigned int, num_vls, krcvqsset); i++)
14196                 if (krcvqs[i] > max_by_vl)
14197                         max_by_vl = krcvqs[i];
14198         if (max_by_vl > 32)
14199                 goto no_qos;
14200         m = ilog2(__roundup_pow_of_two(max_by_vl));
14201
14202         /* determine bits for vl */
14203         n = ilog2(__roundup_pow_of_two(num_vls));
14204
14205         /* reject if too much is used */
14206         if ((m + n) > 7)
14207                 goto no_qos;
14208
14209         if (mp)
14210                 *mp = m;
14211         if (np)
14212                 *np = n;
14213
14214         return 1 << (m + n);
14215
14216 no_qos:
14217         if (mp)
14218                 *mp = 0;
14219         if (np)
14220                 *np = 0;
14221         return 0;
14222 }
14223
14224 /**
14225  * init_qos - init RX qos
14226  * @dd - device data
14227  * @rmt - RSM map table
14228  *
14229  * This routine initializes Rule 0 and the RSM map table to implement
14230  * quality of service (qos).
14231  *
14232  * If all of the limit tests succeed, qos is applied based on the array
14233  * interpretation of krcvqs where entry 0 is VL0.
14234  *
14235  * The number of vl bits (n) and the number of qpn bits (m) are computed to
14236  * feed both the RSM map table and the single rule.
14237  */
14238 static void init_qos(struct hfi1_devdata *dd, struct rsm_map_table *rmt)
14239 {
14240         struct rsm_rule_data rrd;
14241         unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m;
14242         unsigned int rmt_entries;
14243         u64 reg;
14244
14245         if (!rmt)
14246                 goto bail;
14247         rmt_entries = qos_rmt_entries(dd, &m, &n);
14248         if (rmt_entries == 0)
14249                 goto bail;
14250         qpns_per_vl = 1 << m;
14251
14252         /* enough room in the map table? */
14253         rmt_entries = 1 << (m + n);
14254         if (rmt->used + rmt_entries >= NUM_MAP_ENTRIES)
14255                 goto bail;
14256
14257         /* add qos entries to the the RSM map table */
14258         for (i = 0, ctxt = FIRST_KERNEL_KCTXT; i < num_vls; i++) {
14259                 unsigned tctxt;
14260
14261                 for (qpn = 0, tctxt = ctxt;
14262                      krcvqs[i] && qpn < qpns_per_vl; qpn++) {
14263                         unsigned idx, regoff, regidx;
14264
14265                         /* generate the index the hardware will produce */
14266                         idx = rmt->used + ((qpn << n) ^ i);
14267                         regoff = (idx % 8) * 8;
14268                         regidx = idx / 8;
14269                         /* replace default with context number */
14270                         reg = rmt->map[regidx];
14271                         reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK
14272                                 << regoff);
14273                         reg |= (u64)(tctxt++) << regoff;
14274                         rmt->map[regidx] = reg;
14275                         if (tctxt == ctxt + krcvqs[i])
14276                                 tctxt = ctxt;
14277                 }
14278                 ctxt += krcvqs[i];
14279         }
14280
14281         rrd.offset = rmt->used;
14282         rrd.pkt_type = 2;
14283         rrd.field1_off = LRH_BTH_MATCH_OFFSET;
14284         rrd.field2_off = LRH_SC_MATCH_OFFSET;
14285         rrd.index1_off = LRH_SC_SELECT_OFFSET;
14286         rrd.index1_width = n;
14287         rrd.index2_off = QPN_SELECT_OFFSET;
14288         rrd.index2_width = m + n;
14289         rrd.mask1 = LRH_BTH_MASK;
14290         rrd.value1 = LRH_BTH_VALUE;
14291         rrd.mask2 = LRH_SC_MASK;
14292         rrd.value2 = LRH_SC_VALUE;
14293
14294         /* add rule 0 */
14295         add_rsm_rule(dd, RSM_INS_VERBS, &rrd);
14296
14297         /* mark RSM map entries as used */
14298         rmt->used += rmt_entries;
14299         /* map everything else to the mcast/err/vl15 context */
14300         init_qpmap_table(dd, HFI1_CTRL_CTXT, HFI1_CTRL_CTXT);
14301         dd->qos_shift = n + 1;
14302         return;
14303 bail:
14304         dd->qos_shift = 1;
14305         init_qpmap_table(dd, FIRST_KERNEL_KCTXT, dd->n_krcv_queues - 1);
14306 }
14307
14308 static void init_fecn_handling(struct hfi1_devdata *dd,
14309                                struct rsm_map_table *rmt)
14310 {
14311         struct rsm_rule_data rrd;
14312         u64 reg;
14313         int i, idx, regoff, regidx, start;
14314         u8 offset;
14315         u32 total_cnt;
14316
14317         if (HFI1_CAP_IS_KSET(TID_RDMA))
14318                 /* Exclude context 0 */
14319                 start = 1;
14320         else
14321                 start = dd->first_dyn_alloc_ctxt;
14322
14323         total_cnt = dd->num_rcv_contexts - start;
14324
14325         /* there needs to be enough room in the map table */
14326         if (rmt->used + total_cnt >= NUM_MAP_ENTRIES) {
14327                 dd_dev_err(dd, "FECN handling disabled - too many contexts allocated\n");
14328                 return;
14329         }
14330
14331         /*
14332          * RSM will extract the destination context as an index into the
14333          * map table.  The destination contexts are a sequential block
14334          * in the range start...num_rcv_contexts-1 (inclusive).
14335          * Map entries are accessed as offset + extracted value.  Adjust
14336          * the added offset so this sequence can be placed anywhere in
14337          * the table - as long as the entries themselves do not wrap.
14338          * There are only enough bits in offset for the table size, so
14339          * start with that to allow for a "negative" offset.
14340          */
14341         offset = (u8)(NUM_MAP_ENTRIES + rmt->used - start);
14342
14343         for (i = start, idx = rmt->used; i < dd->num_rcv_contexts;
14344              i++, idx++) {
14345                 /* replace with identity mapping */
14346                 regoff = (idx % 8) * 8;
14347                 regidx = idx / 8;
14348                 reg = rmt->map[regidx];
14349                 reg &= ~(RCV_RSM_MAP_TABLE_RCV_CONTEXT_A_MASK << regoff);
14350                 reg |= (u64)i << regoff;
14351                 rmt->map[regidx] = reg;
14352         }
14353
14354         /*
14355          * For RSM intercept of Expected FECN packets:
14356          * o packet type 0 - expected
14357          * o match on F (bit 95), using select/match 1, and
14358          * o match on SH (bit 133), using select/match 2.
14359          *
14360          * Use index 1 to extract the 8-bit receive context from DestQP
14361          * (start at bit 64).  Use that as the RSM map table index.
14362          */
14363         rrd.offset = offset;
14364         rrd.pkt_type = 0;
14365         rrd.field1_off = 95;
14366         rrd.field2_off = 133;
14367         rrd.index1_off = 64;
14368         rrd.index1_width = 8;
14369         rrd.index2_off = 0;
14370         rrd.index2_width = 0;
14371         rrd.mask1 = 1;
14372         rrd.value1 = 1;
14373         rrd.mask2 = 1;
14374         rrd.value2 = 1;
14375
14376         /* add rule 1 */
14377         add_rsm_rule(dd, RSM_INS_FECN, &rrd);
14378
14379         rmt->used += total_cnt;
14380 }
14381
14382 /* Initialize RSM for VNIC */
14383 void hfi1_init_vnic_rsm(struct hfi1_devdata *dd)
14384 {
14385         u8 i, j;
14386         u8 ctx_id = 0;
14387         u64 reg;
14388         u32 regoff;
14389         struct rsm_rule_data rrd;
14390
14391         if (hfi1_vnic_is_rsm_full(dd, NUM_VNIC_MAP_ENTRIES)) {
14392                 dd_dev_err(dd, "Vnic RSM disabled, rmt entries used = %d\n",
14393                            dd->vnic.rmt_start);
14394                 return;
14395         }
14396
14397         dev_dbg(&(dd)->pcidev->dev, "Vnic rsm start = %d, end %d\n",
14398                 dd->vnic.rmt_start,
14399                 dd->vnic.rmt_start + NUM_VNIC_MAP_ENTRIES);
14400
14401         /* Update RSM mapping table, 32 regs, 256 entries - 1 ctx per byte */
14402         regoff = RCV_RSM_MAP_TABLE + (dd->vnic.rmt_start / 8) * 8;
14403         reg = read_csr(dd, regoff);
14404         for (i = 0; i < NUM_VNIC_MAP_ENTRIES; i++) {
14405                 /* Update map register with vnic context */
14406                 j = (dd->vnic.rmt_start + i) % 8;
14407                 reg &= ~(0xffllu << (j * 8));
14408                 reg |= (u64)dd->vnic.ctxt[ctx_id++]->ctxt << (j * 8);
14409                 /* Wrap up vnic ctx index */
14410                 ctx_id %= dd->vnic.num_ctxt;
14411                 /* Write back map register */
14412                 if (j == 7 || ((i + 1) == NUM_VNIC_MAP_ENTRIES)) {
14413                         dev_dbg(&(dd)->pcidev->dev,
14414                                 "Vnic rsm map reg[%d] =0x%llx\n",
14415                                 regoff - RCV_RSM_MAP_TABLE, reg);
14416
14417                         write_csr(dd, regoff, reg);
14418                         regoff += 8;
14419                         if (i < (NUM_VNIC_MAP_ENTRIES - 1))
14420                                 reg = read_csr(dd, regoff);
14421                 }
14422         }
14423
14424         /* Add rule for vnic */
14425         rrd.offset = dd->vnic.rmt_start;
14426         rrd.pkt_type = 4;
14427         /* Match 16B packets */
14428         rrd.field1_off = L2_TYPE_MATCH_OFFSET;
14429         rrd.mask1 = L2_TYPE_MASK;
14430         rrd.value1 = L2_16B_VALUE;
14431         /* Match ETH L4 packets */
14432         rrd.field2_off = L4_TYPE_MATCH_OFFSET;
14433         rrd.mask2 = L4_16B_TYPE_MASK;
14434         rrd.value2 = L4_16B_ETH_VALUE;
14435         /* Calc context from veswid and entropy */
14436         rrd.index1_off = L4_16B_HDR_VESWID_OFFSET;
14437         rrd.index1_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14438         rrd.index2_off = L2_16B_ENTROPY_OFFSET;
14439         rrd.index2_width = ilog2(NUM_VNIC_MAP_ENTRIES);
14440         add_rsm_rule(dd, RSM_INS_VNIC, &rrd);
14441
14442         /* Enable RSM if not already enabled */
14443         add_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14444 }
14445
14446 void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
14447 {
14448         clear_rsm_rule(dd, RSM_INS_VNIC);
14449
14450         /* Disable RSM if used only by vnic */
14451         if (dd->vnic.rmt_start == 0)
14452                 clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
14453 }
14454
14455 static int init_rxe(struct hfi1_devdata *dd)
14456 {
14457         struct rsm_map_table *rmt;
14458         u64 val;
14459
14460         /* enable all receive errors */
14461         write_csr(dd, RCV_ERR_MASK, ~0ull);
14462
14463         rmt = alloc_rsm_map_table(dd);
14464         if (!rmt)
14465                 return -ENOMEM;
14466
14467         /* set up QOS, including the QPN map table */
14468         init_qos(dd, rmt);
14469         init_fecn_handling(dd, rmt);
14470         complete_rsm_map_table(dd, rmt);
14471         /* record number of used rsm map entries for vnic */
14472         dd->vnic.rmt_start = rmt->used;
14473         kfree(rmt);
14474
14475         /*
14476          * make sure RcvCtrl.RcvWcb <= PCIe Device Control
14477          * Register Max_Payload_Size (PCI_EXP_DEVCTL in Linux PCIe config
14478          * space, PciCfgCap2.MaxPayloadSize in HFI).  There is only one
14479          * invalid configuration: RcvCtrl.RcvWcb set to its max of 256 and
14480          * Max_PayLoad_Size set to its minimum of 128.
14481          *
14482          * Presently, RcvCtrl.RcvWcb is not modified from its default of 0
14483          * (64 bytes).  Max_Payload_Size is possibly modified upward in
14484          * tune_pcie_caps() which is called after this routine.
14485          */
14486
14487         /* Have 16 bytes (4DW) of bypass header available in header queue */
14488         val = read_csr(dd, RCV_BYPASS);
14489         val &= ~RCV_BYPASS_HDR_SIZE_SMASK;
14490         val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
14491                 RCV_BYPASS_HDR_SIZE_SHIFT);
14492         write_csr(dd, RCV_BYPASS, val);
14493         return 0;
14494 }
14495
14496 static void init_other(struct hfi1_devdata *dd)
14497 {
14498         /* enable all CCE errors */
14499         write_csr(dd, CCE_ERR_MASK, ~0ull);
14500         /* enable *some* Misc errors */
14501         write_csr(dd, MISC_ERR_MASK, DRIVER_MISC_MASK);
14502         /* enable all DC errors, except LCB */
14503         write_csr(dd, DCC_ERR_FLG_EN, ~0ull);
14504         write_csr(dd, DC_DC8051_ERR_EN, ~0ull);
14505 }
14506
14507 /*
14508  * Fill out the given AU table using the given CU.  A CU is defined in terms
14509  * AUs.  The table is a an encoding: given the index, how many AUs does that
14510  * represent?
14511  *
14512  * NOTE: Assumes that the register layout is the same for the
14513  * local and remote tables.
14514  */
14515 static void assign_cm_au_table(struct hfi1_devdata *dd, u32 cu,
14516                                u32 csr0to3, u32 csr4to7)
14517 {
14518         write_csr(dd, csr0to3,
14519                   0ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE0_SHIFT |
14520                   1ull << SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE1_SHIFT |
14521                   2ull * cu <<
14522                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE2_SHIFT |
14523                   4ull * cu <<
14524                   SEND_CM_LOCAL_AU_TABLE0_TO3_LOCAL_AU_TABLE3_SHIFT);
14525         write_csr(dd, csr4to7,
14526                   8ull * cu <<
14527                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE4_SHIFT |
14528                   16ull * cu <<
14529                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE5_SHIFT |
14530                   32ull * cu <<
14531                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE6_SHIFT |
14532                   64ull * cu <<
14533                   SEND_CM_LOCAL_AU_TABLE4_TO7_LOCAL_AU_TABLE7_SHIFT);
14534 }
14535
14536 static void assign_local_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14537 {
14538         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_LOCAL_AU_TABLE0_TO3,
14539                            SEND_CM_LOCAL_AU_TABLE4_TO7);
14540 }
14541
14542 void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu)
14543 {
14544         assign_cm_au_table(dd, vcu_to_cu(vcu), SEND_CM_REMOTE_AU_TABLE0_TO3,
14545                            SEND_CM_REMOTE_AU_TABLE4_TO7);
14546 }
14547
14548 static void init_txe(struct hfi1_devdata *dd)
14549 {
14550         int i;
14551
14552         /* enable all PIO, SDMA, general, and Egress errors */
14553         write_csr(dd, SEND_PIO_ERR_MASK, ~0ull);
14554         write_csr(dd, SEND_DMA_ERR_MASK, ~0ull);
14555         write_csr(dd, SEND_ERR_MASK, ~0ull);
14556         write_csr(dd, SEND_EGRESS_ERR_MASK, ~0ull);
14557
14558         /* enable all per-context and per-SDMA engine errors */
14559         for (i = 0; i < chip_send_contexts(dd); i++)
14560                 write_kctxt_csr(dd, i, SEND_CTXT_ERR_MASK, ~0ull);
14561         for (i = 0; i < chip_sdma_engines(dd); i++)
14562                 write_kctxt_csr(dd, i, SEND_DMA_ENG_ERR_MASK, ~0ull);
14563
14564         /* set the local CU to AU mapping */
14565         assign_local_cm_au_table(dd, dd->vcu);
14566
14567         /*
14568          * Set reasonable default for Credit Return Timer
14569          * Don't set on Simulator - causes it to choke.
14570          */
14571         if (dd->icode != ICODE_FUNCTIONAL_SIMULATOR)
14572                 write_csr(dd, SEND_CM_TIMER_CTRL, HFI1_CREDIT_RETURN_RATE);
14573 }
14574
14575 int hfi1_set_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14576                        u16 jkey)
14577 {
14578         u8 hw_ctxt;
14579         u64 reg;
14580
14581         if (!rcd || !rcd->sc)
14582                 return -EINVAL;
14583
14584         hw_ctxt = rcd->sc->hw_context;
14585         reg = SEND_CTXT_CHECK_JOB_KEY_MASK_SMASK | /* mask is always 1's */
14586                 ((jkey & SEND_CTXT_CHECK_JOB_KEY_VALUE_MASK) <<
14587                  SEND_CTXT_CHECK_JOB_KEY_VALUE_SHIFT);
14588         /* JOB_KEY_ALLOW_PERMISSIVE is not allowed by default */
14589         if (HFI1_CAP_KGET_MASK(rcd->flags, ALLOW_PERM_JKEY))
14590                 reg |= SEND_CTXT_CHECK_JOB_KEY_ALLOW_PERMISSIVE_SMASK;
14591         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, reg);
14592         /*
14593          * Enable send-side J_KEY integrity check, unless this is A0 h/w
14594          */
14595         if (!is_ax(dd)) {
14596                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14597                 reg |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14598                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14599         }
14600
14601         /* Enable J_KEY check on receive context. */
14602         reg = RCV_KEY_CTRL_JOB_KEY_ENABLE_SMASK |
14603                 ((jkey & RCV_KEY_CTRL_JOB_KEY_VALUE_MASK) <<
14604                  RCV_KEY_CTRL_JOB_KEY_VALUE_SHIFT);
14605         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, reg);
14606
14607         return 0;
14608 }
14609
14610 int hfi1_clear_ctxt_jkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
14611 {
14612         u8 hw_ctxt;
14613         u64 reg;
14614
14615         if (!rcd || !rcd->sc)
14616                 return -EINVAL;
14617
14618         hw_ctxt = rcd->sc->hw_context;
14619         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_JOB_KEY, 0);
14620         /*
14621          * Disable send-side J_KEY integrity check, unless this is A0 h/w.
14622          * This check would not have been enabled for A0 h/w, see
14623          * set_ctxt_jkey().
14624          */
14625         if (!is_ax(dd)) {
14626                 reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14627                 reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK;
14628                 write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14629         }
14630         /* Turn off the J_KEY on the receive side */
14631         write_kctxt_csr(dd, rcd->ctxt, RCV_KEY_CTRL, 0);
14632
14633         return 0;
14634 }
14635
14636 int hfi1_set_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd,
14637                        u16 pkey)
14638 {
14639         u8 hw_ctxt;
14640         u64 reg;
14641
14642         if (!rcd || !rcd->sc)
14643                 return -EINVAL;
14644
14645         hw_ctxt = rcd->sc->hw_context;
14646         reg = ((u64)pkey & SEND_CTXT_CHECK_PARTITION_KEY_VALUE_MASK) <<
14647                 SEND_CTXT_CHECK_PARTITION_KEY_VALUE_SHIFT;
14648         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, reg);
14649         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14650         reg |= SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14651         reg &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_KDETH_PACKETS_SMASK;
14652         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14653
14654         return 0;
14655 }
14656
14657 int hfi1_clear_ctxt_pkey(struct hfi1_devdata *dd, struct hfi1_ctxtdata *ctxt)
14658 {
14659         u8 hw_ctxt;
14660         u64 reg;
14661
14662         if (!ctxt || !ctxt->sc)
14663                 return -EINVAL;
14664
14665         hw_ctxt = ctxt->sc->hw_context;
14666         reg = read_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE);
14667         reg &= ~SEND_CTXT_CHECK_ENABLE_CHECK_PARTITION_KEY_SMASK;
14668         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_ENABLE, reg);
14669         write_kctxt_csr(dd, hw_ctxt, SEND_CTXT_CHECK_PARTITION_KEY, 0);
14670
14671         return 0;
14672 }
14673
14674 /*
14675  * Start doing the clean up the the chip. Our clean up happens in multiple
14676  * stages and this is just the first.
14677  */
14678 void hfi1_start_cleanup(struct hfi1_devdata *dd)
14679 {
14680         aspm_exit(dd);
14681         free_cntrs(dd);
14682         free_rcverr(dd);
14683         finish_chip_resources(dd);
14684 }
14685
14686 #define HFI_BASE_GUID(dev) \
14687         ((dev)->base_guid & ~(1ULL << GUID_HFI_INDEX_SHIFT))
14688
14689 /*
14690  * Information can be shared between the two HFIs on the same ASIC
14691  * in the same OS.  This function finds the peer device and sets
14692  * up a shared structure.
14693  */
14694 static int init_asic_data(struct hfi1_devdata *dd)
14695 {
14696         unsigned long index;
14697         struct hfi1_devdata *peer;
14698         struct hfi1_asic_data *asic_data;
14699         int ret = 0;
14700
14701         /* pre-allocate the asic structure in case we are the first device */
14702         asic_data = kzalloc(sizeof(*dd->asic_data), GFP_KERNEL);
14703         if (!asic_data)
14704                 return -ENOMEM;
14705
14706         xa_lock_irq(&hfi1_dev_table);
14707         /* Find our peer device */
14708         xa_for_each(&hfi1_dev_table, index, peer) {
14709                 if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
14710                     dd->unit != peer->unit)
14711                         break;
14712         }
14713
14714         if (peer) {
14715                 /* use already allocated structure */
14716                 dd->asic_data = peer->asic_data;
14717                 kfree(asic_data);
14718         } else {
14719                 dd->asic_data = asic_data;
14720                 mutex_init(&dd->asic_data->asic_resource_mutex);
14721         }
14722         dd->asic_data->dds[dd->hfi1_id] = dd; /* self back-pointer */
14723         xa_unlock_irq(&hfi1_dev_table);
14724
14725         /* first one through - set up i2c devices */
14726         if (!peer)
14727                 ret = set_up_i2c(dd, dd->asic_data);
14728
14729         return ret;
14730 }
14731
14732 /*
14733  * Set dd->boardname.  Use a generic name if a name is not returned from
14734  * EFI variable space.
14735  *
14736  * Return 0 on success, -ENOMEM if space could not be allocated.
14737  */
14738 static int obtain_boardname(struct hfi1_devdata *dd)
14739 {
14740         /* generic board description */
14741         const char generic[] =
14742                 "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
14743         unsigned long size;
14744         int ret;
14745
14746         ret = read_hfi1_efi_var(dd, "description", &size,
14747                                 (void **)&dd->boardname);
14748         if (ret) {
14749                 dd_dev_info(dd, "Board description not found\n");
14750                 /* use generic description */
14751                 dd->boardname = kstrdup(generic, GFP_KERNEL);
14752                 if (!dd->boardname)
14753                         return -ENOMEM;
14754         }
14755         return 0;
14756 }
14757
14758 /*
14759  * Check the interrupt registers to make sure that they are mapped correctly.
14760  * It is intended to help user identify any mismapping by VMM when the driver
14761  * is running in a VM. This function should only be called before interrupt
14762  * is set up properly.
14763  *
14764  * Return 0 on success, -EINVAL on failure.
14765  */
14766 static int check_int_registers(struct hfi1_devdata *dd)
14767 {
14768         u64 reg;
14769         u64 all_bits = ~(u64)0;
14770         u64 mask;
14771
14772         /* Clear CceIntMask[0] to avoid raising any interrupts */
14773         mask = read_csr(dd, CCE_INT_MASK);
14774         write_csr(dd, CCE_INT_MASK, 0ull);
14775         reg = read_csr(dd, CCE_INT_MASK);
14776         if (reg)
14777                 goto err_exit;
14778
14779         /* Clear all interrupt status bits */
14780         write_csr(dd, CCE_INT_CLEAR, all_bits);
14781         reg = read_csr(dd, CCE_INT_STATUS);
14782         if (reg)
14783                 goto err_exit;
14784
14785         /* Set all interrupt status bits */
14786         write_csr(dd, CCE_INT_FORCE, all_bits);
14787         reg = read_csr(dd, CCE_INT_STATUS);
14788         if (reg != all_bits)
14789                 goto err_exit;
14790
14791         /* Restore the interrupt mask */
14792         write_csr(dd, CCE_INT_CLEAR, all_bits);
14793         write_csr(dd, CCE_INT_MASK, mask);
14794
14795         return 0;
14796 err_exit:
14797         write_csr(dd, CCE_INT_MASK, mask);
14798         dd_dev_err(dd, "Interrupt registers not properly mapped by VMM\n");
14799         return -EINVAL;
14800 }
14801
14802 /**
14803  * hfi1_init_dd() - Initialize most of the dd structure.
14804  * @dev: the pci_dev for hfi1_ib device
14805  * @ent: pci_device_id struct for this dev
14806  *
14807  * This is global, and is called directly at init to set up the
14808  * chip-specific function pointers for later use.
14809  */
14810 int hfi1_init_dd(struct hfi1_devdata *dd)
14811 {
14812         struct pci_dev *pdev = dd->pcidev;
14813         struct hfi1_pportdata *ppd;
14814         u64 reg;
14815         int i, ret;
14816         static const char * const inames[] = { /* implementation names */
14817                 "RTL silicon",
14818                 "RTL VCS simulation",
14819                 "RTL FPGA emulation",
14820                 "Functional simulator"
14821         };
14822         struct pci_dev *parent = pdev->bus->self;
14823         u32 sdma_engines = chip_sdma_engines(dd);
14824
14825         ppd = dd->pport;
14826         for (i = 0; i < dd->num_pports; i++, ppd++) {
14827                 int vl;
14828                 /* init common fields */
14829                 hfi1_init_pportdata(pdev, ppd, dd, 0, 1);
14830                 /* DC supports 4 link widths */
14831                 ppd->link_width_supported =
14832                         OPA_LINK_WIDTH_1X | OPA_LINK_WIDTH_2X |
14833                         OPA_LINK_WIDTH_3X | OPA_LINK_WIDTH_4X;
14834                 ppd->link_width_downgrade_supported =
14835                         ppd->link_width_supported;
14836                 /* start out enabling only 4X */
14837                 ppd->link_width_enabled = OPA_LINK_WIDTH_4X;
14838                 ppd->link_width_downgrade_enabled =
14839                                         ppd->link_width_downgrade_supported;
14840                 /* link width active is 0 when link is down */
14841                 /* link width downgrade active is 0 when link is down */
14842
14843                 if (num_vls < HFI1_MIN_VLS_SUPPORTED ||
14844                     num_vls > HFI1_MAX_VLS_SUPPORTED) {
14845                         dd_dev_err(dd, "Invalid num_vls %u, using %u VLs\n",
14846                                    num_vls, HFI1_MAX_VLS_SUPPORTED);
14847                         num_vls = HFI1_MAX_VLS_SUPPORTED;
14848                 }
14849                 ppd->vls_supported = num_vls;
14850                 ppd->vls_operational = ppd->vls_supported;
14851                 /* Set the default MTU. */
14852                 for (vl = 0; vl < num_vls; vl++)
14853                         dd->vld[vl].mtu = hfi1_max_mtu;
14854                 dd->vld[15].mtu = MAX_MAD_PACKET;
14855                 /*
14856                  * Set the initial values to reasonable default, will be set
14857                  * for real when link is up.
14858                  */
14859                 ppd->overrun_threshold = 0x4;
14860                 ppd->phy_error_threshold = 0xf;
14861                 ppd->port_crc_mode_enabled = link_crc_mask;
14862                 /* initialize supported LTP CRC mode */
14863                 ppd->port_ltp_crc_mode = cap_to_port_ltp(link_crc_mask) << 8;
14864                 /* initialize enabled LTP CRC mode */
14865                 ppd->port_ltp_crc_mode |= cap_to_port_ltp(link_crc_mask) << 4;
14866                 /* start in offline */
14867                 ppd->host_link_state = HLS_DN_OFFLINE;
14868                 init_vl_arb_caches(ppd);
14869         }
14870
14871         /*
14872          * Do remaining PCIe setup and save PCIe values in dd.
14873          * Any error printing is already done by the init code.
14874          * On return, we have the chip mapped.
14875          */
14876         ret = hfi1_pcie_ddinit(dd, pdev);
14877         if (ret < 0)
14878                 goto bail_free;
14879
14880         /* Save PCI space registers to rewrite after device reset */
14881         ret = save_pci_variables(dd);
14882         if (ret < 0)
14883                 goto bail_cleanup;
14884
14885         dd->majrev = (dd->revision >> CCE_REVISION_CHIP_REV_MAJOR_SHIFT)
14886                         & CCE_REVISION_CHIP_REV_MAJOR_MASK;
14887         dd->minrev = (dd->revision >> CCE_REVISION_CHIP_REV_MINOR_SHIFT)
14888                         & CCE_REVISION_CHIP_REV_MINOR_MASK;
14889
14890         /*
14891          * Check interrupt registers mapping if the driver has no access to
14892          * the upstream component. In this case, it is likely that the driver
14893          * is running in a VM.
14894          */
14895         if (!parent) {
14896                 ret = check_int_registers(dd);
14897                 if (ret)
14898                         goto bail_cleanup;
14899         }
14900
14901         /*
14902          * obtain the hardware ID - NOT related to unit, which is a
14903          * software enumeration
14904          */
14905         reg = read_csr(dd, CCE_REVISION2);
14906         dd->hfi1_id = (reg >> CCE_REVISION2_HFI_ID_SHIFT)
14907                                         & CCE_REVISION2_HFI_ID_MASK;
14908         /* the variable size will remove unwanted bits */
14909         dd->icode = reg >> CCE_REVISION2_IMPL_CODE_SHIFT;
14910         dd->irev = reg >> CCE_REVISION2_IMPL_REVISION_SHIFT;
14911         dd_dev_info(dd, "Implementation: %s, revision 0x%x\n",
14912                     dd->icode < ARRAY_SIZE(inames) ?
14913                     inames[dd->icode] : "unknown", (int)dd->irev);
14914
14915         /* speeds the hardware can support */
14916         dd->pport->link_speed_supported = OPA_LINK_SPEED_25G;
14917         /* speeds allowed to run at */
14918         dd->pport->link_speed_enabled = dd->pport->link_speed_supported;
14919         /* give a reasonable active value, will be set on link up */
14920         dd->pport->link_speed_active = OPA_LINK_SPEED_25G;
14921
14922         /* fix up link widths for emulation _p */
14923         ppd = dd->pport;
14924         if (dd->icode == ICODE_FPGA_EMULATION && is_emulator_p(dd)) {
14925                 ppd->link_width_supported =
14926                         ppd->link_width_enabled =
14927                         ppd->link_width_downgrade_supported =
14928                         ppd->link_width_downgrade_enabled =
14929                                 OPA_LINK_WIDTH_1X;
14930         }
14931         /* insure num_vls isn't larger than number of sdma engines */
14932         if (HFI1_CAP_IS_KSET(SDMA) && num_vls > sdma_engines) {
14933                 dd_dev_err(dd, "num_vls %u too large, using %u VLs\n",
14934                            num_vls, sdma_engines);
14935                 num_vls = sdma_engines;
14936                 ppd->vls_supported = sdma_engines;
14937                 ppd->vls_operational = ppd->vls_supported;
14938         }
14939
14940         /*
14941          * Convert the ns parameter to the 64 * cclocks used in the CSR.
14942          * Limit the max if larger than the field holds.  If timeout is
14943          * non-zero, then the calculated field will be at least 1.
14944          *
14945          * Must be after icode is set up - the cclock rate depends
14946          * on knowing the hardware being used.
14947          */
14948         dd->rcv_intr_timeout_csr = ns_to_cclock(dd, rcv_intr_timeout) / 64;
14949         if (dd->rcv_intr_timeout_csr >
14950                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK)
14951                 dd->rcv_intr_timeout_csr =
14952                         RCV_AVAIL_TIME_OUT_TIME_OUT_RELOAD_MASK;
14953         else if (dd->rcv_intr_timeout_csr == 0 && rcv_intr_timeout)
14954                 dd->rcv_intr_timeout_csr = 1;
14955
14956         /* needs to be done before we look for the peer device */
14957         read_guid(dd);
14958
14959         /* set up shared ASIC data with peer device */
14960         ret = init_asic_data(dd);
14961         if (ret)
14962                 goto bail_cleanup;
14963
14964         /* obtain chip sizes, reset chip CSRs */
14965         ret = init_chip(dd);
14966         if (ret)
14967                 goto bail_cleanup;
14968
14969         /* read in the PCIe link speed information */
14970         ret = pcie_speeds(dd);
14971         if (ret)
14972                 goto bail_cleanup;
14973
14974         /* call before get_platform_config(), after init_chip_resources() */
14975         ret = eprom_init(dd);
14976         if (ret)
14977                 goto bail_free_rcverr;
14978
14979         /* Needs to be called before hfi1_firmware_init */
14980         get_platform_config(dd);
14981
14982         /* read in firmware */
14983         ret = hfi1_firmware_init(dd);
14984         if (ret)
14985                 goto bail_cleanup;
14986
14987         /*
14988          * In general, the PCIe Gen3 transition must occur after the
14989          * chip has been idled (so it won't initiate any PCIe transactions
14990          * e.g. an interrupt) and before the driver changes any registers
14991          * (the transition will reset the registers).
14992          *
14993          * In particular, place this call after:
14994          * - init_chip()     - the chip will not initiate any PCIe transactions
14995          * - pcie_speeds()   - reads the current link speed
14996          * - hfi1_firmware_init() - the needed firmware is ready to be
14997          *                          downloaded
14998          */
14999         ret = do_pcie_gen3_transition(dd);
15000         if (ret)
15001                 goto bail_cleanup;
15002
15003         /*
15004          * This should probably occur in hfi1_pcie_init(), but historically
15005          * occurs after the do_pcie_gen3_transition() code.
15006          */
15007         tune_pcie_caps(dd);
15008
15009         /* start setting dd values and adjusting CSRs */
15010         init_early_variables(dd);
15011
15012         parse_platform_config(dd);
15013
15014         ret = obtain_boardname(dd);
15015         if (ret)
15016                 goto bail_cleanup;
15017
15018         snprintf(dd->boardversion, BOARD_VERS_MAX,
15019                  "ChipABI %u.%u, ChipRev %u.%u, SW Compat %llu\n",
15020                  HFI1_CHIP_VERS_MAJ, HFI1_CHIP_VERS_MIN,
15021                  (u32)dd->majrev,
15022                  (u32)dd->minrev,
15023                  (dd->revision >> CCE_REVISION_SW_SHIFT)
15024                     & CCE_REVISION_SW_MASK);
15025
15026         ret = set_up_context_variables(dd);
15027         if (ret)
15028                 goto bail_cleanup;
15029
15030         /* set initial RXE CSRs */
15031         ret = init_rxe(dd);
15032         if (ret)
15033                 goto bail_cleanup;
15034
15035         /* set initial TXE CSRs */
15036         init_txe(dd);
15037         /* set initial non-RXE, non-TXE CSRs */
15038         init_other(dd);
15039         /* set up KDETH QP prefix in both RX and TX CSRs */
15040         init_kdeth_qp(dd);
15041
15042         ret = hfi1_dev_affinity_init(dd);
15043         if (ret)
15044                 goto bail_cleanup;
15045
15046         /* send contexts must be set up before receive contexts */
15047         ret = init_send_contexts(dd);
15048         if (ret)
15049                 goto bail_cleanup;
15050
15051         ret = hfi1_create_kctxts(dd);
15052         if (ret)
15053                 goto bail_cleanup;
15054
15055         /*
15056          * Initialize aspm, to be done after gen3 transition and setting up
15057          * contexts and before enabling interrupts
15058          */
15059         aspm_init(dd);
15060
15061         ret = init_pervl_scs(dd);
15062         if (ret)
15063                 goto bail_cleanup;
15064
15065         /* sdma init */
15066         for (i = 0; i < dd->num_pports; ++i) {
15067                 ret = sdma_init(dd, i);
15068                 if (ret)
15069                         goto bail_cleanup;
15070         }
15071
15072         /* use contexts created by hfi1_create_kctxts */
15073         ret = set_up_interrupts(dd);
15074         if (ret)
15075                 goto bail_cleanup;
15076
15077         ret = hfi1_comp_vectors_set_up(dd);
15078         if (ret)
15079                 goto bail_clear_intr;
15080
15081         /* set up LCB access - must be after set_up_interrupts() */
15082         init_lcb_access(dd);
15083
15084         /*
15085          * Serial number is created from the base guid:
15086          * [27:24] = base guid [38:35]
15087          * [23: 0] = base guid [23: 0]
15088          */
15089         snprintf(dd->serial, SERIAL_MAX, "0x%08llx\n",
15090                  (dd->base_guid & 0xFFFFFF) |
15091                      ((dd->base_guid >> 11) & 0xF000000));
15092
15093         dd->oui1 = dd->base_guid >> 56 & 0xFF;
15094         dd->oui2 = dd->base_guid >> 48 & 0xFF;
15095         dd->oui3 = dd->base_guid >> 40 & 0xFF;
15096
15097         ret = load_firmware(dd); /* asymmetric with dispose_firmware() */
15098         if (ret)
15099                 goto bail_clear_intr;
15100
15101         thermal_init(dd);
15102
15103         ret = init_cntrs(dd);
15104         if (ret)
15105                 goto bail_clear_intr;
15106
15107         ret = init_rcverr(dd);
15108         if (ret)
15109                 goto bail_free_cntrs;
15110
15111         init_completion(&dd->user_comp);
15112
15113         /* The user refcount starts with one to inidicate an active device */
15114         atomic_set(&dd->user_refcount, 1);
15115
15116         goto bail;
15117
15118 bail_free_rcverr:
15119         free_rcverr(dd);
15120 bail_free_cntrs:
15121         free_cntrs(dd);
15122 bail_clear_intr:
15123         hfi1_comp_vectors_clean_up(dd);
15124         msix_clean_up_interrupts(dd);
15125 bail_cleanup:
15126         hfi1_pcie_ddcleanup(dd);
15127 bail_free:
15128         hfi1_free_devdata(dd);
15129 bail:
15130         return ret;
15131 }
15132
15133 static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
15134                         u32 dw_len)
15135 {
15136         u32 delta_cycles;
15137         u32 current_egress_rate = ppd->current_egress_rate;
15138         /* rates here are in units of 10^6 bits/sec */
15139
15140         if (desired_egress_rate == -1)
15141                 return 0; /* shouldn't happen */
15142
15143         if (desired_egress_rate >= current_egress_rate)
15144                 return 0; /* we can't help go faster, only slower */
15145
15146         delta_cycles = egress_cycles(dw_len * 4, desired_egress_rate) -
15147                         egress_cycles(dw_len * 4, current_egress_rate);
15148
15149         return (u16)delta_cycles;
15150 }
15151
15152 /**
15153  * create_pbc - build a pbc for transmission
15154  * @flags: special case flags or-ed in built pbc
15155  * @srate: static rate
15156  * @vl: vl
15157  * @dwlen: dword length (header words + data words + pbc words)
15158  *
15159  * Create a PBC with the given flags, rate, VL, and length.
15160  *
15161  * NOTE: The PBC created will not insert any HCRC - all callers but one are
15162  * for verbs, which does not use this PSM feature.  The lone other caller
15163  * is for the diagnostic interface which calls this if the user does not
15164  * supply their own PBC.
15165  */
15166 u64 create_pbc(struct hfi1_pportdata *ppd, u64 flags, int srate_mbs, u32 vl,
15167                u32 dw_len)
15168 {
15169         u64 pbc, delay = 0;
15170
15171         if (unlikely(srate_mbs))
15172                 delay = delay_cycles(ppd, srate_mbs, dw_len);
15173
15174         pbc = flags
15175                 | (delay << PBC_STATIC_RATE_CONTROL_COUNT_SHIFT)
15176                 | ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT)
15177                 | (vl & PBC_VL_MASK) << PBC_VL_SHIFT
15178                 | (dw_len & PBC_LENGTH_DWS_MASK)
15179                         << PBC_LENGTH_DWS_SHIFT;
15180
15181         return pbc;
15182 }
15183
15184 #define SBUS_THERMAL    0x4f
15185 #define SBUS_THERM_MONITOR_MODE 0x1
15186
15187 #define THERM_FAILURE(dev, ret, reason) \
15188         dd_dev_err((dd),                                                \
15189                    "Thermal sensor initialization failed: %s (%d)\n",   \
15190                    (reason), (ret))
15191
15192 /*
15193  * Initialize the thermal sensor.
15194  *
15195  * After initialization, enable polling of thermal sensor through
15196  * SBus interface. In order for this to work, the SBus Master
15197  * firmware has to be loaded due to the fact that the HW polling
15198  * logic uses SBus interrupts, which are not supported with
15199  * default firmware. Otherwise, no data will be returned through
15200  * the ASIC_STS_THERM CSR.
15201  */
15202 static int thermal_init(struct hfi1_devdata *dd)
15203 {
15204         int ret = 0;
15205
15206         if (dd->icode != ICODE_RTL_SILICON ||
15207             check_chip_resource(dd, CR_THERM_INIT, NULL))
15208                 return ret;
15209
15210         ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT);
15211         if (ret) {
15212                 THERM_FAILURE(dd, ret, "Acquire SBus");
15213                 return ret;
15214         }
15215
15216         dd_dev_info(dd, "Initializing thermal sensor\n");
15217         /* Disable polling of thermal readings */
15218         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x0);
15219         msleep(100);
15220         /* Thermal Sensor Initialization */
15221         /*    Step 1: Reset the Thermal SBus Receiver */
15222         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15223                                 RESET_SBUS_RECEIVER, 0);
15224         if (ret) {
15225                 THERM_FAILURE(dd, ret, "Bus Reset");
15226                 goto done;
15227         }
15228         /*    Step 2: Set Reset bit in Thermal block */
15229         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15230                                 WRITE_SBUS_RECEIVER, 0x1);
15231         if (ret) {
15232                 THERM_FAILURE(dd, ret, "Therm Block Reset");
15233                 goto done;
15234         }
15235         /*    Step 3: Write clock divider value (100MHz -> 2MHz) */
15236         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x1,
15237                                 WRITE_SBUS_RECEIVER, 0x32);
15238         if (ret) {
15239                 THERM_FAILURE(dd, ret, "Write Clock Div");
15240                 goto done;
15241         }
15242         /*    Step 4: Select temperature mode */
15243         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x3,
15244                                 WRITE_SBUS_RECEIVER,
15245                                 SBUS_THERM_MONITOR_MODE);
15246         if (ret) {
15247                 THERM_FAILURE(dd, ret, "Write Mode Sel");
15248                 goto done;
15249         }
15250         /*    Step 5: De-assert block reset and start conversion */
15251         ret = sbus_request_slow(dd, SBUS_THERMAL, 0x0,
15252                                 WRITE_SBUS_RECEIVER, 0x2);
15253         if (ret) {
15254                 THERM_FAILURE(dd, ret, "Write Reset Deassert");
15255                 goto done;
15256         }
15257         /*    Step 5.1: Wait for first conversion (21.5ms per spec) */
15258         msleep(22);
15259
15260         /* Enable polling of thermal readings */
15261         write_csr(dd, ASIC_CFG_THERM_POLL_EN, 0x1);
15262
15263         /* Set initialized flag */
15264         ret = acquire_chip_resource(dd, CR_THERM_INIT, 0);
15265         if (ret)
15266                 THERM_FAILURE(dd, ret, "Unable to set thermal init flag");
15267
15268 done:
15269         release_chip_resource(dd, CR_SBUS);
15270         return ret;
15271 }
15272
15273 static void handle_temp_err(struct hfi1_devdata *dd)
15274 {
15275         struct hfi1_pportdata *ppd = &dd->pport[0];
15276         /*
15277          * Thermal Critical Interrupt
15278          * Put the device into forced freeze mode, take link down to
15279          * offline, and put DC into reset.
15280          */
15281         dd_dev_emerg(dd,
15282                      "Critical temperature reached! Forcing device into freeze mode!\n");
15283         dd->flags |= HFI1_FORCED_FREEZE;
15284         start_freeze_handling(ppd, FREEZE_SELF | FREEZE_ABORT);
15285         /*
15286          * Shut DC down as much and as quickly as possible.
15287          *
15288          * Step 1: Take the link down to OFFLINE. This will cause the
15289          *         8051 to put the Serdes in reset. However, we don't want to
15290          *         go through the entire link state machine since we want to
15291          *         shutdown ASAP. Furthermore, this is not a graceful shutdown
15292          *         but rather an attempt to save the chip.
15293          *         Code below is almost the same as quiet_serdes() but avoids
15294          *         all the extra work and the sleeps.
15295          */
15296         ppd->driver_link_ready = 0;
15297         ppd->link_enabled = 0;
15298         set_physical_link_state(dd, (OPA_LINKDOWN_REASON_SMA_DISABLED << 8) |
15299                                 PLS_OFFLINE);
15300         /*
15301          * Step 2: Shutdown LCB and 8051
15302          *         After shutdown, do not restore DC_CFG_RESET value.
15303          */
15304         dc_shutdown(dd);
15305 }