Merge tag 'gemini-dts-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[sfrench/cifs-2.6.git] / drivers / scsi / hisi_sas / hisi_sas_v3_hw.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2017 Hisilicon Limited.
4  */
5
6 #include "hisi_sas.h"
7 #define DRV_NAME "hisi_sas_v3_hw"
8
9 /* global registers need init */
10 #define DLVRY_QUEUE_ENABLE              0x0
11 #define IOST_BASE_ADDR_LO               0x8
12 #define IOST_BASE_ADDR_HI               0xc
13 #define ITCT_BASE_ADDR_LO               0x10
14 #define ITCT_BASE_ADDR_HI               0x14
15 #define IO_BROKEN_MSG_ADDR_LO           0x18
16 #define IO_BROKEN_MSG_ADDR_HI           0x1c
17 #define PHY_CONTEXT                     0x20
18 #define PHY_STATE                       0x24
19 #define PHY_PORT_NUM_MA                 0x28
20 #define PHY_CONN_RATE                   0x30
21 #define ITCT_CLR                        0x44
22 #define ITCT_CLR_EN_OFF                 16
23 #define ITCT_CLR_EN_MSK                 (0x1 << ITCT_CLR_EN_OFF)
24 #define ITCT_DEV_OFF                    0
25 #define ITCT_DEV_MSK                    (0x7ff << ITCT_DEV_OFF)
26 #define IO_SATA_BROKEN_MSG_ADDR_LO      0x58
27 #define IO_SATA_BROKEN_MSG_ADDR_HI      0x5c
28 #define SATA_INITI_D2H_STORE_ADDR_LO    0x60
29 #define SATA_INITI_D2H_STORE_ADDR_HI    0x64
30 #define CFG_MAX_TAG                     0x68
31 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
32 #define HGC_SAS_TXFAIL_RETRY_CTRL       0x88
33 #define HGC_GET_ITV_TIME                0x90
34 #define DEVICE_MSG_WORK_MODE            0x94
35 #define OPENA_WT_CONTI_TIME             0x9c
36 #define I_T_NEXUS_LOSS_TIME             0xa0
37 #define MAX_CON_TIME_LIMIT_TIME         0xa4
38 #define BUS_INACTIVE_LIMIT_TIME         0xa8
39 #define REJECT_TO_OPEN_LIMIT_TIME       0xac
40 #define CQ_INT_CONVERGE_EN              0xb0
41 #define CFG_AGING_TIME                  0xbc
42 #define HGC_DFX_CFG2                    0xc0
43 #define CFG_ABT_SET_QUERY_IPTT  0xd4
44 #define CFG_SET_ABORTED_IPTT_OFF        0
45 #define CFG_SET_ABORTED_IPTT_MSK        (0xfff << CFG_SET_ABORTED_IPTT_OFF)
46 #define CFG_SET_ABORTED_EN_OFF  12
47 #define CFG_ABT_SET_IPTT_DONE   0xd8
48 #define CFG_ABT_SET_IPTT_DONE_OFF       0
49 #define HGC_IOMB_PROC1_STATUS   0x104
50 #define HGC_LM_DFX_STATUS2              0x128
51 #define HGC_LM_DFX_STATUS2_IOSTLIST_OFF         0
52 #define HGC_LM_DFX_STATUS2_IOSTLIST_MSK (0xfff << \
53                                          HGC_LM_DFX_STATUS2_IOSTLIST_OFF)
54 #define HGC_LM_DFX_STATUS2_ITCTLIST_OFF         12
55 #define HGC_LM_DFX_STATUS2_ITCTLIST_MSK (0x7ff << \
56                                          HGC_LM_DFX_STATUS2_ITCTLIST_OFF)
57 #define HGC_CQE_ECC_ADDR                0x13c
58 #define HGC_CQE_ECC_1B_ADDR_OFF 0
59 #define HGC_CQE_ECC_1B_ADDR_MSK (0x3f << HGC_CQE_ECC_1B_ADDR_OFF)
60 #define HGC_CQE_ECC_MB_ADDR_OFF 8
61 #define HGC_CQE_ECC_MB_ADDR_MSK (0x3f << HGC_CQE_ECC_MB_ADDR_OFF)
62 #define HGC_IOST_ECC_ADDR               0x140
63 #define HGC_IOST_ECC_1B_ADDR_OFF        0
64 #define HGC_IOST_ECC_1B_ADDR_MSK        (0x3ff << HGC_IOST_ECC_1B_ADDR_OFF)
65 #define HGC_IOST_ECC_MB_ADDR_OFF        16
66 #define HGC_IOST_ECC_MB_ADDR_MSK        (0x3ff << HGC_IOST_ECC_MB_ADDR_OFF)
67 #define HGC_DQE_ECC_ADDR                0x144
68 #define HGC_DQE_ECC_1B_ADDR_OFF 0
69 #define HGC_DQE_ECC_1B_ADDR_MSK (0xfff << HGC_DQE_ECC_1B_ADDR_OFF)
70 #define HGC_DQE_ECC_MB_ADDR_OFF 16
71 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF)
72 #define CHNL_INT_STATUS                 0x148
73 #define HGC_ITCT_ECC_ADDR               0x150
74 #define HGC_ITCT_ECC_1B_ADDR_OFF                0
75 #define HGC_ITCT_ECC_1B_ADDR_MSK                (0x3ff << \
76                                                  HGC_ITCT_ECC_1B_ADDR_OFF)
77 #define HGC_ITCT_ECC_MB_ADDR_OFF                16
78 #define HGC_ITCT_ECC_MB_ADDR_MSK                (0x3ff << \
79                                                  HGC_ITCT_ECC_MB_ADDR_OFF)
80 #define HGC_AXI_FIFO_ERR_INFO  0x154
81 #define AXI_ERR_INFO_OFF               0
82 #define AXI_ERR_INFO_MSK               (0xff << AXI_ERR_INFO_OFF)
83 #define FIFO_ERR_INFO_OFF              8
84 #define FIFO_ERR_INFO_MSK              (0xff << FIFO_ERR_INFO_OFF)
85 #define INT_COAL_EN                     0x19c
86 #define OQ_INT_COAL_TIME                0x1a0
87 #define OQ_INT_COAL_CNT                 0x1a4
88 #define ENT_INT_COAL_TIME               0x1a8
89 #define ENT_INT_COAL_CNT                0x1ac
90 #define OQ_INT_SRC                      0x1b0
91 #define OQ_INT_SRC_MSK                  0x1b4
92 #define ENT_INT_SRC1                    0x1b8
93 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF    0
94 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK    (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
95 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF    8
96 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK    (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
97 #define ENT_INT_SRC2                    0x1bc
98 #define ENT_INT_SRC3                    0x1c0
99 #define ENT_INT_SRC3_WP_DEPTH_OFF               8
100 #define ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF      9
101 #define ENT_INT_SRC3_RP_DEPTH_OFF               10
102 #define ENT_INT_SRC3_AXI_OFF                    11
103 #define ENT_INT_SRC3_FIFO_OFF                   12
104 #define ENT_INT_SRC3_LM_OFF                             14
105 #define ENT_INT_SRC3_ITC_INT_OFF        15
106 #define ENT_INT_SRC3_ITC_INT_MSK        (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
107 #define ENT_INT_SRC3_ABT_OFF            16
108 #define ENT_INT_SRC3_DQE_POISON_OFF     18
109 #define ENT_INT_SRC3_IOST_POISON_OFF    19
110 #define ENT_INT_SRC3_ITCT_POISON_OFF    20
111 #define ENT_INT_SRC3_ITCT_NCQ_POISON_OFF        21
112 #define ENT_INT_SRC_MSK1                0x1c4
113 #define ENT_INT_SRC_MSK2                0x1c8
114 #define ENT_INT_SRC_MSK3                0x1cc
115 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF  31
116 #define CHNL_PHYUPDOWN_INT_MSK          0x1d0
117 #define CHNL_ENT_INT_MSK                        0x1d4
118 #define HGC_COM_INT_MSK                         0x1d8
119 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK  (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
120 #define SAS_ECC_INTR                    0x1e8
121 #define SAS_ECC_INTR_DQE_ECC_1B_OFF             0
122 #define SAS_ECC_INTR_DQE_ECC_MB_OFF             1
123 #define SAS_ECC_INTR_IOST_ECC_1B_OFF    2
124 #define SAS_ECC_INTR_IOST_ECC_MB_OFF    3
125 #define SAS_ECC_INTR_ITCT_ECC_1B_OFF    4
126 #define SAS_ECC_INTR_ITCT_ECC_MB_OFF    5
127 #define SAS_ECC_INTR_ITCTLIST_ECC_1B_OFF        6
128 #define SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF        7
129 #define SAS_ECC_INTR_IOSTLIST_ECC_1B_OFF        8
130 #define SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF        9
131 #define SAS_ECC_INTR_CQE_ECC_1B_OFF             10
132 #define SAS_ECC_INTR_CQE_ECC_MB_OFF             11
133 #define SAS_ECC_INTR_NCQ_MEM0_ECC_1B_OFF        12
134 #define SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF        13
135 #define SAS_ECC_INTR_NCQ_MEM1_ECC_1B_OFF        14
136 #define SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF        15
137 #define SAS_ECC_INTR_NCQ_MEM2_ECC_1B_OFF        16
138 #define SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF        17
139 #define SAS_ECC_INTR_NCQ_MEM3_ECC_1B_OFF        18
140 #define SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF        19
141 #define SAS_ECC_INTR_OOO_RAM_ECC_1B_OFF         20
142 #define SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF         21
143 #define SAS_ECC_INTR_MSK                0x1ec
144 #define HGC_ERR_STAT_EN                 0x238
145 #define CQE_SEND_CNT                    0x248
146 #define DLVRY_Q_0_BASE_ADDR_LO          0x260
147 #define DLVRY_Q_0_BASE_ADDR_HI          0x264
148 #define DLVRY_Q_0_DEPTH                 0x268
149 #define DLVRY_Q_0_WR_PTR                0x26c
150 #define DLVRY_Q_0_RD_PTR                0x270
151 #define HYPER_STREAM_ID_EN_CFG          0xc80
152 #define OQ0_INT_SRC_MSK                 0xc90
153 #define COMPL_Q_0_BASE_ADDR_LO          0x4e0
154 #define COMPL_Q_0_BASE_ADDR_HI          0x4e4
155 #define COMPL_Q_0_DEPTH                 0x4e8
156 #define COMPL_Q_0_WR_PTR                0x4ec
157 #define COMPL_Q_0_RD_PTR                0x4f0
158 #define HGC_RXM_DFX_STATUS14            0xae8
159 #define HGC_RXM_DFX_STATUS14_MEM0_OFF   0
160 #define HGC_RXM_DFX_STATUS14_MEM0_MSK   (0x1ff << \
161                                          HGC_RXM_DFX_STATUS14_MEM0_OFF)
162 #define HGC_RXM_DFX_STATUS14_MEM1_OFF   9
163 #define HGC_RXM_DFX_STATUS14_MEM1_MSK   (0x1ff << \
164                                          HGC_RXM_DFX_STATUS14_MEM1_OFF)
165 #define HGC_RXM_DFX_STATUS14_MEM2_OFF   18
166 #define HGC_RXM_DFX_STATUS14_MEM2_MSK   (0x1ff << \
167                                          HGC_RXM_DFX_STATUS14_MEM2_OFF)
168 #define HGC_RXM_DFX_STATUS15            0xaec
169 #define HGC_RXM_DFX_STATUS15_MEM3_OFF   0
170 #define HGC_RXM_DFX_STATUS15_MEM3_MSK   (0x1ff << \
171                                          HGC_RXM_DFX_STATUS15_MEM3_OFF)
172 #define AWQOS_AWCACHE_CFG       0xc84
173 #define ARQOS_ARCACHE_CFG       0xc88
174 #define HILINK_ERR_DFX          0xe04
175 #define SAS_GPIO_CFG_0          0x1000
176 #define SAS_GPIO_CFG_1          0x1004
177 #define SAS_GPIO_TX_0_1 0x1040
178 #define SAS_CFG_DRIVE_VLD       0x1070
179
180 /* phy registers requiring init */
181 #define PORT_BASE                       (0x2000)
182 #define PHY_CFG                         (PORT_BASE + 0x0)
183 #define HARD_PHY_LINKRATE               (PORT_BASE + 0x4)
184 #define PHY_CFG_ENA_OFF                 0
185 #define PHY_CFG_ENA_MSK                 (0x1 << PHY_CFG_ENA_OFF)
186 #define PHY_CFG_DC_OPT_OFF              2
187 #define PHY_CFG_DC_OPT_MSK              (0x1 << PHY_CFG_DC_OPT_OFF)
188 #define PHY_CFG_PHY_RST_OFF             3
189 #define PHY_CFG_PHY_RST_MSK             (0x1 << PHY_CFG_PHY_RST_OFF)
190 #define PROG_PHY_LINK_RATE              (PORT_BASE + 0x8)
191 #define PHY_CTRL                        (PORT_BASE + 0x14)
192 #define PHY_CTRL_RESET_OFF              0
193 #define PHY_CTRL_RESET_MSK              (0x1 << PHY_CTRL_RESET_OFF)
194 #define CMD_HDR_PIR_OFF                 8
195 #define CMD_HDR_PIR_MSK                 (0x1 << CMD_HDR_PIR_OFF)
196 #define SERDES_CFG                      (PORT_BASE + 0x1c)
197 #define SL_CFG                          (PORT_BASE + 0x84)
198 #define AIP_LIMIT                       (PORT_BASE + 0x90)
199 #define SL_CONTROL                      (PORT_BASE + 0x94)
200 #define SL_CONTROL_NOTIFY_EN_OFF        0
201 #define SL_CONTROL_NOTIFY_EN_MSK        (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
202 #define SL_CTA_OFF              17
203 #define SL_CTA_MSK              (0x1 << SL_CTA_OFF)
204 #define RX_PRIMS_STATUS                 (PORT_BASE + 0x98)
205 #define RX_BCAST_CHG_OFF                1
206 #define RX_BCAST_CHG_MSK                (0x1 << RX_BCAST_CHG_OFF)
207 #define TX_ID_DWORD0                    (PORT_BASE + 0x9c)
208 #define TX_ID_DWORD1                    (PORT_BASE + 0xa0)
209 #define TX_ID_DWORD2                    (PORT_BASE + 0xa4)
210 #define TX_ID_DWORD3                    (PORT_BASE + 0xa8)
211 #define TX_ID_DWORD4                    (PORT_BASE + 0xaC)
212 #define TX_ID_DWORD5                    (PORT_BASE + 0xb0)
213 #define TX_ID_DWORD6                    (PORT_BASE + 0xb4)
214 #define TXID_AUTO                               (PORT_BASE + 0xb8)
215 #define CT3_OFF         1
216 #define CT3_MSK         (0x1 << CT3_OFF)
217 #define TX_HARDRST_OFF          2
218 #define TX_HARDRST_MSK          (0x1 << TX_HARDRST_OFF)
219 #define RX_IDAF_DWORD0                  (PORT_BASE + 0xc4)
220 #define RXOP_CHECK_CFG_H                (PORT_BASE + 0xfc)
221 #define STP_LINK_TIMER                  (PORT_BASE + 0x120)
222 #define STP_LINK_TIMEOUT_STATE          (PORT_BASE + 0x124)
223 #define CON_CFG_DRIVER                  (PORT_BASE + 0x130)
224 #define SAS_SSP_CON_TIMER_CFG           (PORT_BASE + 0x134)
225 #define SAS_SMP_CON_TIMER_CFG           (PORT_BASE + 0x138)
226 #define SAS_STP_CON_TIMER_CFG           (PORT_BASE + 0x13c)
227 #define CHL_INT0                        (PORT_BASE + 0x1b4)
228 #define CHL_INT0_HOTPLUG_TOUT_OFF       0
229 #define CHL_INT0_HOTPLUG_TOUT_MSK       (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
230 #define CHL_INT0_SL_RX_BCST_ACK_OFF     1
231 #define CHL_INT0_SL_RX_BCST_ACK_MSK     (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
232 #define CHL_INT0_SL_PHY_ENABLE_OFF      2
233 #define CHL_INT0_SL_PHY_ENABLE_MSK      (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
234 #define CHL_INT0_NOT_RDY_OFF            4
235 #define CHL_INT0_NOT_RDY_MSK            (0x1 << CHL_INT0_NOT_RDY_OFF)
236 #define CHL_INT0_PHY_RDY_OFF            5
237 #define CHL_INT0_PHY_RDY_MSK            (0x1 << CHL_INT0_PHY_RDY_OFF)
238 #define CHL_INT1                        (PORT_BASE + 0x1b8)
239 #define CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF 15
240 #define CHL_INT1_DMAC_TX_ECC_1B_ERR_OFF 16
241 #define CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF 17
242 #define CHL_INT1_DMAC_RX_ECC_1B_ERR_OFF 18
243 #define CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF 19
244 #define CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF 20
245 #define CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF 21
246 #define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
247 #define CHL_INT1_DMAC_TX_FIFO_ERR_OFF   23
248 #define CHL_INT1_DMAC_RX_FIFO_ERR_OFF   24
249 #define CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF      26
250 #define CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF      27
251 #define CHL_INT2                        (PORT_BASE + 0x1bc)
252 #define CHL_INT2_SL_IDAF_TOUT_CONF_OFF  0
253 #define CHL_INT2_RX_DISP_ERR_OFF        28
254 #define CHL_INT2_RX_CODE_ERR_OFF        29
255 #define CHL_INT2_RX_INVLD_DW_OFF        30
256 #define CHL_INT2_STP_LINK_TIMEOUT_OFF   31
257 #define CHL_INT0_MSK                    (PORT_BASE + 0x1c0)
258 #define CHL_INT1_MSK                    (PORT_BASE + 0x1c4)
259 #define CHL_INT2_MSK                    (PORT_BASE + 0x1c8)
260 #define SAS_EC_INT_COAL_TIME            (PORT_BASE + 0x1cc)
261 #define CHL_INT_COAL_EN                 (PORT_BASE + 0x1d0)
262 #define SAS_RX_TRAIN_TIMER              (PORT_BASE + 0x2a4)
263 #define PHY_CTRL_RDY_MSK                (PORT_BASE + 0x2b0)
264 #define PHYCTRL_NOT_RDY_MSK             (PORT_BASE + 0x2b4)
265 #define PHYCTRL_DWS_RESET_MSK           (PORT_BASE + 0x2b8)
266 #define PHYCTRL_PHY_ENA_MSK             (PORT_BASE + 0x2bc)
267 #define SL_RX_BCAST_CHK_MSK             (PORT_BASE + 0x2c0)
268 #define PHYCTRL_OOB_RESTART_MSK         (PORT_BASE + 0x2c4)
269 #define DMA_TX_STATUS                   (PORT_BASE + 0x2d0)
270 #define DMA_TX_STATUS_BUSY_OFF          0
271 #define DMA_TX_STATUS_BUSY_MSK          (0x1 << DMA_TX_STATUS_BUSY_OFF)
272 #define DMA_RX_STATUS                   (PORT_BASE + 0x2e8)
273 #define DMA_RX_STATUS_BUSY_OFF          0
274 #define DMA_RX_STATUS_BUSY_MSK          (0x1 << DMA_RX_STATUS_BUSY_OFF)
275
276 #define COARSETUNE_TIME                 (PORT_BASE + 0x304)
277 #define ERR_CNT_DWS_LOST                (PORT_BASE + 0x380)
278 #define ERR_CNT_RESET_PROB              (PORT_BASE + 0x384)
279 #define ERR_CNT_INVLD_DW                (PORT_BASE + 0x390)
280 #define ERR_CNT_CODE_ERR                (PORT_BASE + 0x394)
281 #define ERR_CNT_DISP_ERR                (PORT_BASE + 0x398)
282
283 #define DEFAULT_ITCT_HW         2048 /* reset value, not reprogrammed */
284 #if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
285 #error Max ITCT exceeded
286 #endif
287
288 #define AXI_MASTER_CFG_BASE             (0x5000)
289 #define AM_CTRL_GLOBAL                  (0x0)
290 #define AM_CTRL_SHUTDOWN_REQ_OFF        0
291 #define AM_CTRL_SHUTDOWN_REQ_MSK        (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF)
292 #define AM_CURR_TRANS_RETURN    (0x150)
293
294 #define AM_CFG_MAX_TRANS                (0x5010)
295 #define AM_CFG_SINGLE_PORT_MAX_TRANS    (0x5014)
296 #define AXI_CFG                                 (0x5100)
297 #define AM_ROB_ECC_ERR_ADDR             (0x510c)
298 #define AM_ROB_ECC_ERR_ADDR_OFF 0
299 #define AM_ROB_ECC_ERR_ADDR_MSK 0xffffffff
300
301 /* RAS registers need init */
302 #define RAS_BASE                (0x6000)
303 #define SAS_RAS_INTR0                   (RAS_BASE)
304 #define SAS_RAS_INTR1                   (RAS_BASE + 0x04)
305 #define SAS_RAS_INTR0_MASK              (RAS_BASE + 0x08)
306 #define SAS_RAS_INTR1_MASK              (RAS_BASE + 0x0c)
307 #define CFG_SAS_RAS_INTR_MASK           (RAS_BASE + 0x1c)
308 #define SAS_RAS_INTR2                   (RAS_BASE + 0x20)
309 #define SAS_RAS_INTR2_MASK              (RAS_BASE + 0x24)
310
311 /* HW dma structures */
312 /* Delivery queue header */
313 /* dw0 */
314 #define CMD_HDR_ABORT_FLAG_OFF          0
315 #define CMD_HDR_ABORT_FLAG_MSK          (0x3 << CMD_HDR_ABORT_FLAG_OFF)
316 #define CMD_HDR_ABORT_DEVICE_TYPE_OFF   2
317 #define CMD_HDR_ABORT_DEVICE_TYPE_MSK   (0x1 << CMD_HDR_ABORT_DEVICE_TYPE_OFF)
318 #define CMD_HDR_RESP_REPORT_OFF         5
319 #define CMD_HDR_RESP_REPORT_MSK         (0x1 << CMD_HDR_RESP_REPORT_OFF)
320 #define CMD_HDR_TLR_CTRL_OFF            6
321 #define CMD_HDR_TLR_CTRL_MSK            (0x3 << CMD_HDR_TLR_CTRL_OFF)
322 #define CMD_HDR_PORT_OFF                18
323 #define CMD_HDR_PORT_MSK                (0xf << CMD_HDR_PORT_OFF)
324 #define CMD_HDR_PRIORITY_OFF            27
325 #define CMD_HDR_PRIORITY_MSK            (0x1 << CMD_HDR_PRIORITY_OFF)
326 #define CMD_HDR_CMD_OFF                 29
327 #define CMD_HDR_CMD_MSK                 (0x7 << CMD_HDR_CMD_OFF)
328 /* dw1 */
329 #define CMD_HDR_UNCON_CMD_OFF   3
330 #define CMD_HDR_DIR_OFF                 5
331 #define CMD_HDR_DIR_MSK                 (0x3 << CMD_HDR_DIR_OFF)
332 #define CMD_HDR_RESET_OFF               7
333 #define CMD_HDR_RESET_MSK               (0x1 << CMD_HDR_RESET_OFF)
334 #define CMD_HDR_VDTL_OFF                10
335 #define CMD_HDR_VDTL_MSK                (0x1 << CMD_HDR_VDTL_OFF)
336 #define CMD_HDR_FRAME_TYPE_OFF          11
337 #define CMD_HDR_FRAME_TYPE_MSK          (0x1f << CMD_HDR_FRAME_TYPE_OFF)
338 #define CMD_HDR_DEV_ID_OFF              16
339 #define CMD_HDR_DEV_ID_MSK              (0xffff << CMD_HDR_DEV_ID_OFF)
340 /* dw2 */
341 #define CMD_HDR_CFL_OFF                 0
342 #define CMD_HDR_CFL_MSK                 (0x1ff << CMD_HDR_CFL_OFF)
343 #define CMD_HDR_NCQ_TAG_OFF             10
344 #define CMD_HDR_NCQ_TAG_MSK             (0x1f << CMD_HDR_NCQ_TAG_OFF)
345 #define CMD_HDR_MRFL_OFF                15
346 #define CMD_HDR_MRFL_MSK                (0x1ff << CMD_HDR_MRFL_OFF)
347 #define CMD_HDR_SG_MOD_OFF              24
348 #define CMD_HDR_SG_MOD_MSK              (0x3 << CMD_HDR_SG_MOD_OFF)
349 /* dw3 */
350 #define CMD_HDR_IPTT_OFF                0
351 #define CMD_HDR_IPTT_MSK                (0xffff << CMD_HDR_IPTT_OFF)
352 /* dw6 */
353 #define CMD_HDR_DIF_SGL_LEN_OFF         0
354 #define CMD_HDR_DIF_SGL_LEN_MSK         (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
355 #define CMD_HDR_DATA_SGL_LEN_OFF        16
356 #define CMD_HDR_DATA_SGL_LEN_MSK        (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
357 /* dw7 */
358 #define CMD_HDR_ADDR_MODE_SEL_OFF               15
359 #define CMD_HDR_ADDR_MODE_SEL_MSK               (1 << CMD_HDR_ADDR_MODE_SEL_OFF)
360 #define CMD_HDR_ABORT_IPTT_OFF          16
361 #define CMD_HDR_ABORT_IPTT_MSK          (0xffff << CMD_HDR_ABORT_IPTT_OFF)
362
363 /* Completion header */
364 /* dw0 */
365 #define CMPLT_HDR_CMPLT_OFF             0
366 #define CMPLT_HDR_CMPLT_MSK             (0x3 << CMPLT_HDR_CMPLT_OFF)
367 #define CMPLT_HDR_ERROR_PHASE_OFF   2
368 #define CMPLT_HDR_ERROR_PHASE_MSK   (0xff << CMPLT_HDR_ERROR_PHASE_OFF)
369 #define CMPLT_HDR_RSPNS_XFRD_OFF        10
370 #define CMPLT_HDR_RSPNS_XFRD_MSK        (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
371 #define CMPLT_HDR_ERX_OFF               12
372 #define CMPLT_HDR_ERX_MSK               (0x1 << CMPLT_HDR_ERX_OFF)
373 #define CMPLT_HDR_ABORT_STAT_OFF        13
374 #define CMPLT_HDR_ABORT_STAT_MSK        (0x7 << CMPLT_HDR_ABORT_STAT_OFF)
375 /* abort_stat */
376 #define STAT_IO_NOT_VALID               0x1
377 #define STAT_IO_NO_DEVICE               0x2
378 #define STAT_IO_COMPLETE                0x3
379 #define STAT_IO_ABORTED                 0x4
380 /* dw1 */
381 #define CMPLT_HDR_IPTT_OFF              0
382 #define CMPLT_HDR_IPTT_MSK              (0xffff << CMPLT_HDR_IPTT_OFF)
383 #define CMPLT_HDR_DEV_ID_OFF            16
384 #define CMPLT_HDR_DEV_ID_MSK            (0xffff << CMPLT_HDR_DEV_ID_OFF)
385 /* dw3 */
386 #define CMPLT_HDR_IO_IN_TARGET_OFF      17
387 #define CMPLT_HDR_IO_IN_TARGET_MSK      (0x1 << CMPLT_HDR_IO_IN_TARGET_OFF)
388
389 /* ITCT header */
390 /* qw0 */
391 #define ITCT_HDR_DEV_TYPE_OFF           0
392 #define ITCT_HDR_DEV_TYPE_MSK           (0x3 << ITCT_HDR_DEV_TYPE_OFF)
393 #define ITCT_HDR_VALID_OFF              2
394 #define ITCT_HDR_VALID_MSK              (0x1 << ITCT_HDR_VALID_OFF)
395 #define ITCT_HDR_MCR_OFF                5
396 #define ITCT_HDR_MCR_MSK                (0xf << ITCT_HDR_MCR_OFF)
397 #define ITCT_HDR_VLN_OFF                9
398 #define ITCT_HDR_VLN_MSK                (0xf << ITCT_HDR_VLN_OFF)
399 #define ITCT_HDR_SMP_TIMEOUT_OFF        16
400 #define ITCT_HDR_AWT_CONTINUE_OFF       25
401 #define ITCT_HDR_PORT_ID_OFF            28
402 #define ITCT_HDR_PORT_ID_MSK            (0xf << ITCT_HDR_PORT_ID_OFF)
403 /* qw2 */
404 #define ITCT_HDR_INLT_OFF               0
405 #define ITCT_HDR_INLT_MSK               (0xffffULL << ITCT_HDR_INLT_OFF)
406 #define ITCT_HDR_RTOLT_OFF              48
407 #define ITCT_HDR_RTOLT_MSK              (0xffffULL << ITCT_HDR_RTOLT_OFF)
408
409 struct hisi_sas_protect_iu_v3_hw {
410         u32 dw0;
411         u32 lbrtcv;
412         u32 lbrtgv;
413         u32 dw3;
414         u32 dw4;
415         u32 dw5;
416         u32 rsv;
417 };
418
419 struct hisi_sas_complete_v3_hdr {
420         __le32 dw0;
421         __le32 dw1;
422         __le32 act;
423         __le32 dw3;
424 };
425
426 struct hisi_sas_err_record_v3 {
427         /* dw0 */
428         __le32 trans_tx_fail_type;
429
430         /* dw1 */
431         __le32 trans_rx_fail_type;
432
433         /* dw2 */
434         __le16 dma_tx_err_type;
435         __le16 sipc_rx_err_type;
436
437         /* dw3 */
438         __le32 dma_rx_err_type;
439 };
440
441 #define RX_DATA_LEN_UNDERFLOW_OFF       6
442 #define RX_DATA_LEN_UNDERFLOW_MSK       (1 << RX_DATA_LEN_UNDERFLOW_OFF)
443
444 #define HISI_SAS_COMMAND_ENTRIES_V3_HW 4096
445 #define HISI_SAS_MSI_COUNT_V3_HW 32
446
447 #define DIR_NO_DATA 0
448 #define DIR_TO_INI 1
449 #define DIR_TO_DEVICE 2
450 #define DIR_RESERVED 3
451
452 #define FIS_CMD_IS_UNCONSTRAINED(fis) \
453         ((fis.command == ATA_CMD_READ_LOG_EXT) || \
454         (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \
455         ((fis.command == ATA_CMD_DEV_RESET) && \
456         ((fis.control & ATA_SRST) != 0)))
457
458 #define T10_INSRT_EN_OFF    0
459 #define T10_INSRT_EN_MSK    (1 << T10_INSRT_EN_OFF)
460 #define T10_RMV_EN_OFF      1
461 #define T10_RMV_EN_MSK      (1 << T10_RMV_EN_OFF)
462 #define T10_RPLC_EN_OFF     2
463 #define T10_RPLC_EN_MSK     (1 << T10_RPLC_EN_OFF)
464 #define T10_CHK_EN_OFF      3
465 #define T10_CHK_EN_MSK      (1 << T10_CHK_EN_OFF)
466 #define INCR_LBRT_OFF       5
467 #define INCR_LBRT_MSK       (1 << INCR_LBRT_OFF)
468 #define USR_DATA_BLOCK_SZ_OFF   20
469 #define USR_DATA_BLOCK_SZ_MSK   (0x3 << USR_DATA_BLOCK_SZ_OFF)
470 #define T10_CHK_MSK_OFF     16
471 #define T10_CHK_REF_TAG_MSK (0xf0 << T10_CHK_MSK_OFF)
472 #define T10_CHK_APP_TAG_MSK (0xc << T10_CHK_MSK_OFF)
473
474 #define BASE_VECTORS_V3_HW  16
475 #define MIN_AFFINE_VECTORS_V3_HW  (BASE_VECTORS_V3_HW + 1)
476
477 enum {
478         DSM_FUNC_ERR_HANDLE_MSI = 0,
479 };
480
481 static bool hisi_sas_intr_conv;
482 MODULE_PARM_DESC(intr_conv, "interrupt converge enable (0-1)");
483
484 /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
485 static int prot_mask;
486 module_param(prot_mask, int, 0);
487 MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
488
489 static bool auto_affine_msi_experimental;
490 module_param(auto_affine_msi_experimental, bool, 0444);
491 MODULE_PARM_DESC(auto_affine_msi_experimental, "Enable auto-affinity of MSI IRQs as experimental:\n"
492                  "default is off");
493
494 static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
495 {
496         void __iomem *regs = hisi_hba->regs + off;
497
498         return readl(regs);
499 }
500
501 static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off)
502 {
503         void __iomem *regs = hisi_hba->regs + off;
504
505         return readl_relaxed(regs);
506 }
507
508 static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val)
509 {
510         void __iomem *regs = hisi_hba->regs + off;
511
512         writel(val, regs);
513 }
514
515 static void hisi_sas_phy_write32(struct hisi_hba *hisi_hba, int phy_no,
516                                  u32 off, u32 val)
517 {
518         void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
519
520         writel(val, regs);
521 }
522
523 static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
524                                       int phy_no, u32 off)
525 {
526         void __iomem *regs = hisi_hba->regs + (0x400 * phy_no) + off;
527
528         return readl(regs);
529 }
530
531 #define hisi_sas_read32_poll_timeout(off, val, cond, delay_us,          \
532                                      timeout_us)                        \
533 ({                                                                      \
534         void __iomem *regs = hisi_hba->regs + off;                      \
535         readl_poll_timeout(regs, val, cond, delay_us, timeout_us);      \
536 })
537
538 #define hisi_sas_read32_poll_timeout_atomic(off, val, cond, delay_us,   \
539                                             timeout_us)                 \
540 ({                                                                      \
541         void __iomem *regs = hisi_hba->regs + off;                      \
542         readl_poll_timeout_atomic(regs, val, cond, delay_us, timeout_us);\
543 })
544
545 static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
546 {
547         int i;
548
549         /* Global registers init */
550         hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
551                          (u32)((1ULL << hisi_hba->queue_count) - 1));
552         hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
553         hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
554         hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
555         hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
556         hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
557         hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
558         hisi_sas_write32(hisi_hba, CQ_INT_CONVERGE_EN,
559                          hisi_sas_intr_conv);
560         hisi_sas_write32(hisi_hba, OQ_INT_SRC, 0xffff);
561         hisi_sas_write32(hisi_hba, ENT_INT_SRC1, 0xffffffff);
562         hisi_sas_write32(hisi_hba, ENT_INT_SRC2, 0xffffffff);
563         hisi_sas_write32(hisi_hba, ENT_INT_SRC3, 0xffffffff);
564         hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
565         hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
566         hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffc220ff);
567         hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
568         hisi_sas_write32(hisi_hba, CHNL_ENT_INT_MSK, 0x0);
569         hisi_sas_write32(hisi_hba, HGC_COM_INT_MSK, 0x0);
570         hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0x155555);
571         hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
572         hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
573         for (i = 0; i < hisi_hba->queue_count; i++)
574                 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
575
576         hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
577
578         for (i = 0; i < hisi_hba->n_phy; i++) {
579                 struct hisi_sas_phy *phy = &hisi_hba->phy[i];
580                 struct asd_sas_phy *sas_phy = &phy->sas_phy;
581                 u32 prog_phy_link_rate = 0x800;
582
583                 if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
584                                 SAS_LINK_RATE_1_5_GBPS)) {
585                         prog_phy_link_rate = 0x855;
586                 } else {
587                         enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
588
589                         prog_phy_link_rate =
590                                 hisi_sas_get_prog_phy_linkrate_mask(max) |
591                                 0x800;
592                 }
593                 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
594                         prog_phy_link_rate);
595                 hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
596                 hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80);
597                 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
598                 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
599                 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2, 0xffffffff);
600                 hisi_sas_phy_write32(hisi_hba, i, RXOP_CHECK_CFG_H, 0x1000);
601                 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xf2057fff);
602                 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffbfe);
603                 hisi_sas_phy_write32(hisi_hba, i, PHY_CTRL_RDY_MSK, 0x0);
604                 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x0);
605                 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_DWS_RESET_MSK, 0x0);
606                 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x0);
607                 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
608                 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
609                 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
610                 hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
611                 hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
612                 hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME,
613                                      0x30f4240);
614                 /* used for 12G negotiate */
615                 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
616                 hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
617         }
618
619         for (i = 0; i < hisi_hba->queue_count; i++) {
620                 /* Delivery queue */
621                 hisi_sas_write32(hisi_hba,
622                                  DLVRY_Q_0_BASE_ADDR_HI + (i * 0x14),
623                                  upper_32_bits(hisi_hba->cmd_hdr_dma[i]));
624
625                 hisi_sas_write32(hisi_hba, DLVRY_Q_0_BASE_ADDR_LO + (i * 0x14),
626                                  lower_32_bits(hisi_hba->cmd_hdr_dma[i]));
627
628                 hisi_sas_write32(hisi_hba, DLVRY_Q_0_DEPTH + (i * 0x14),
629                                  HISI_SAS_QUEUE_SLOTS);
630
631                 /* Completion queue */
632                 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_HI + (i * 0x14),
633                                  upper_32_bits(hisi_hba->complete_hdr_dma[i]));
634
635                 hisi_sas_write32(hisi_hba, COMPL_Q_0_BASE_ADDR_LO + (i * 0x14),
636                                  lower_32_bits(hisi_hba->complete_hdr_dma[i]));
637
638                 hisi_sas_write32(hisi_hba, COMPL_Q_0_DEPTH + (i * 0x14),
639                                  HISI_SAS_QUEUE_SLOTS);
640         }
641
642         /* itct */
643         hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_LO,
644                          lower_32_bits(hisi_hba->itct_dma));
645
646         hisi_sas_write32(hisi_hba, ITCT_BASE_ADDR_HI,
647                          upper_32_bits(hisi_hba->itct_dma));
648
649         /* iost */
650         hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_LO,
651                          lower_32_bits(hisi_hba->iost_dma));
652
653         hisi_sas_write32(hisi_hba, IOST_BASE_ADDR_HI,
654                          upper_32_bits(hisi_hba->iost_dma));
655
656         /* breakpoint */
657         hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_LO,
658                          lower_32_bits(hisi_hba->breakpoint_dma));
659
660         hisi_sas_write32(hisi_hba, IO_BROKEN_MSG_ADDR_HI,
661                          upper_32_bits(hisi_hba->breakpoint_dma));
662
663         /* SATA broken msg */
664         hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_LO,
665                          lower_32_bits(hisi_hba->sata_breakpoint_dma));
666
667         hisi_sas_write32(hisi_hba, IO_SATA_BROKEN_MSG_ADDR_HI,
668                          upper_32_bits(hisi_hba->sata_breakpoint_dma));
669
670         /* SATA initial fis */
671         hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_LO,
672                          lower_32_bits(hisi_hba->initial_fis_dma));
673
674         hisi_sas_write32(hisi_hba, SATA_INITI_D2H_STORE_ADDR_HI,
675                          upper_32_bits(hisi_hba->initial_fis_dma));
676
677         /* RAS registers init */
678         hisi_sas_write32(hisi_hba, SAS_RAS_INTR0_MASK, 0x0);
679         hisi_sas_write32(hisi_hba, SAS_RAS_INTR1_MASK, 0x0);
680         hisi_sas_write32(hisi_hba, SAS_RAS_INTR2_MASK, 0x0);
681         hisi_sas_write32(hisi_hba, CFG_SAS_RAS_INTR_MASK, 0x0);
682
683         /* LED registers init */
684         hisi_sas_write32(hisi_hba, SAS_CFG_DRIVE_VLD, 0x80000ff);
685         hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1, 0x80808080);
686         hisi_sas_write32(hisi_hba, SAS_GPIO_TX_0_1 + 0x4, 0x80808080);
687         /* Configure blink generator rate A to 1Hz and B to 4Hz */
688         hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_1, 0x121700);
689         hisi_sas_write32(hisi_hba, SAS_GPIO_CFG_0, 0x800000);
690 }
691
692 static void config_phy_opt_mode_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
693 {
694         u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
695
696         cfg &= ~PHY_CFG_DC_OPT_MSK;
697         cfg |= 1 << PHY_CFG_DC_OPT_OFF;
698         hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
699 }
700
701 static void config_id_frame_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
702 {
703         struct sas_identify_frame identify_frame;
704         u32 *identify_buffer;
705
706         memset(&identify_frame, 0, sizeof(identify_frame));
707         identify_frame.dev_type = SAS_END_DEVICE;
708         identify_frame.frame_type = 0;
709         identify_frame._un1 = 1;
710         identify_frame.initiator_bits = SAS_PROTOCOL_ALL;
711         identify_frame.target_bits = SAS_PROTOCOL_NONE;
712         memcpy(&identify_frame._un4_11[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
713         memcpy(&identify_frame.sas_addr[0], hisi_hba->sas_addr, SAS_ADDR_SIZE);
714         identify_frame.phy_id = phy_no;
715         identify_buffer = (u32 *)(&identify_frame);
716
717         hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD0,
718                         __swab32(identify_buffer[0]));
719         hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD1,
720                         __swab32(identify_buffer[1]));
721         hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD2,
722                         __swab32(identify_buffer[2]));
723         hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD3,
724                         __swab32(identify_buffer[3]));
725         hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD4,
726                         __swab32(identify_buffer[4]));
727         hisi_sas_phy_write32(hisi_hba, phy_no, TX_ID_DWORD5,
728                         __swab32(identify_buffer[5]));
729 }
730
731 static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
732                              struct hisi_sas_device *sas_dev)
733 {
734         struct domain_device *device = sas_dev->sas_device;
735         struct device *dev = hisi_hba->dev;
736         u64 qw0, device_id = sas_dev->device_id;
737         struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
738         struct domain_device *parent_dev = device->parent;
739         struct asd_sas_port *sas_port = device->port;
740         struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
741         u64 sas_addr;
742
743         memset(itct, 0, sizeof(*itct));
744
745         /* qw0 */
746         qw0 = 0;
747         switch (sas_dev->dev_type) {
748         case SAS_END_DEVICE:
749         case SAS_EDGE_EXPANDER_DEVICE:
750         case SAS_FANOUT_EXPANDER_DEVICE:
751                 qw0 = HISI_SAS_DEV_TYPE_SSP << ITCT_HDR_DEV_TYPE_OFF;
752                 break;
753         case SAS_SATA_DEV:
754         case SAS_SATA_PENDING:
755                 if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
756                         qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
757                 else
758                         qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
759                 break;
760         default:
761                 dev_warn(dev, "setup itct: unsupported dev type (%d)\n",
762                          sas_dev->dev_type);
763         }
764
765         qw0 |= ((1 << ITCT_HDR_VALID_OFF) |
766                 (device->linkrate << ITCT_HDR_MCR_OFF) |
767                 (1 << ITCT_HDR_VLN_OFF) |
768                 (0xfa << ITCT_HDR_SMP_TIMEOUT_OFF) |
769                 (1 << ITCT_HDR_AWT_CONTINUE_OFF) |
770                 (port->id << ITCT_HDR_PORT_ID_OFF));
771         itct->qw0 = cpu_to_le64(qw0);
772
773         /* qw1 */
774         memcpy(&sas_addr, device->sas_addr, SAS_ADDR_SIZE);
775         itct->sas_addr = cpu_to_le64(__swab64(sas_addr));
776
777         /* qw2 */
778         if (!dev_is_sata(device))
779                 itct->qw2 = cpu_to_le64((5000ULL << ITCT_HDR_INLT_OFF) |
780                                         (0x1ULL << ITCT_HDR_RTOLT_OFF));
781 }
782
783 static void clear_itct_v3_hw(struct hisi_hba *hisi_hba,
784                               struct hisi_sas_device *sas_dev)
785 {
786         DECLARE_COMPLETION_ONSTACK(completion);
787         u64 dev_id = sas_dev->device_id;
788         struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
789         u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
790
791         sas_dev->completion = &completion;
792
793         /* clear the itct interrupt state */
794         if (ENT_INT_SRC3_ITC_INT_MSK & reg_val)
795                 hisi_sas_write32(hisi_hba, ENT_INT_SRC3,
796                                  ENT_INT_SRC3_ITC_INT_MSK);
797
798         /* clear the itct table */
799         reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
800         hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
801
802         wait_for_completion(sas_dev->completion);
803         memset(itct, 0, sizeof(struct hisi_sas_itct));
804 }
805
806 static void dereg_device_v3_hw(struct hisi_hba *hisi_hba,
807                                 struct domain_device *device)
808 {
809         struct hisi_sas_slot *slot, *slot2;
810         struct hisi_sas_device *sas_dev = device->lldd_dev;
811         u32 cfg_abt_set_query_iptt;
812
813         cfg_abt_set_query_iptt = hisi_sas_read32(hisi_hba,
814                 CFG_ABT_SET_QUERY_IPTT);
815         list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry) {
816                 cfg_abt_set_query_iptt &= ~CFG_SET_ABORTED_IPTT_MSK;
817                 cfg_abt_set_query_iptt |= (1 << CFG_SET_ABORTED_EN_OFF) |
818                         (slot->idx << CFG_SET_ABORTED_IPTT_OFF);
819                 hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
820                         cfg_abt_set_query_iptt);
821         }
822         cfg_abt_set_query_iptt &= ~(1 << CFG_SET_ABORTED_EN_OFF);
823         hisi_sas_write32(hisi_hba, CFG_ABT_SET_QUERY_IPTT,
824                 cfg_abt_set_query_iptt);
825         hisi_sas_write32(hisi_hba, CFG_ABT_SET_IPTT_DONE,
826                                         1 << CFG_ABT_SET_IPTT_DONE_OFF);
827 }
828
829 static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
830 {
831         struct device *dev = hisi_hba->dev;
832         int ret;
833         u32 val;
834
835         hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
836
837         /* Disable all of the PHYs */
838         hisi_sas_stop_phys(hisi_hba);
839         udelay(50);
840
841         /* Ensure axi bus idle */
842         ret = hisi_sas_read32_poll_timeout(AXI_CFG, val, !val,
843                                            20000, 1000000);
844         if (ret) {
845                 dev_err(dev, "axi bus is not idle, ret = %d!\n", ret);
846                 return -EIO;
847         }
848
849         if (ACPI_HANDLE(dev)) {
850                 acpi_status s;
851
852                 s = acpi_evaluate_object(ACPI_HANDLE(dev), "_RST", NULL, NULL);
853                 if (ACPI_FAILURE(s)) {
854                         dev_err(dev, "Reset failed\n");
855                         return -EIO;
856                 }
857         } else {
858                 dev_err(dev, "no reset method!\n");
859                 return -EINVAL;
860         }
861
862         return 0;
863 }
864
865 static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
866 {
867         struct device *dev = hisi_hba->dev;
868         union acpi_object *obj;
869         guid_t guid;
870         int rc;
871
872         rc = reset_hw_v3_hw(hisi_hba);
873         if (rc) {
874                 dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
875                 return rc;
876         }
877
878         msleep(100);
879         init_reg_v3_hw(hisi_hba);
880
881         if (guid_parse("D5918B4B-37AE-4E10-A99F-E5E8A6EF4C1F", &guid)) {
882                 dev_err(dev, "Parse GUID failed\n");
883                 return -EINVAL;
884         }
885
886         /* Switch over to MSI handling , from PCI AER default */
887         obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0,
888                                 DSM_FUNC_ERR_HANDLE_MSI, NULL);
889         if (!obj)
890                 dev_warn(dev, "Switch over to MSI handling failed\n");
891         else
892                 ACPI_FREE(obj);
893
894         return 0;
895 }
896
897 static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
898 {
899         u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
900
901         cfg |= PHY_CFG_ENA_MSK;
902         cfg &= ~PHY_CFG_PHY_RST_MSK;
903         hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
904 }
905
906 static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
907 {
908         u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
909         u32 state;
910
911         cfg &= ~PHY_CFG_ENA_MSK;
912         hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
913
914         mdelay(50);
915
916         state = hisi_sas_read32(hisi_hba, PHY_STATE);
917         if (state & BIT(phy_no)) {
918                 cfg |= PHY_CFG_PHY_RST_MSK;
919                 hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
920         }
921 }
922
923 static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
924 {
925         config_id_frame_v3_hw(hisi_hba, phy_no);
926         config_phy_opt_mode_v3_hw(hisi_hba, phy_no);
927         enable_phy_v3_hw(hisi_hba, phy_no);
928 }
929
930 static void phy_hard_reset_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
931 {
932         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
933         u32 txid_auto;
934
935         hisi_sas_phy_enable(hisi_hba, phy_no, 0);
936         if (phy->identify.device_type == SAS_END_DEVICE) {
937                 txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
938                 hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
939                                         txid_auto | TX_HARDRST_MSK);
940         }
941         msleep(100);
942         hisi_sas_phy_enable(hisi_hba, phy_no, 1);
943 }
944
945 static enum sas_linkrate phy_get_max_linkrate_v3_hw(void)
946 {
947         return SAS_LINK_RATE_12_0_GBPS;
948 }
949
950 static void phys_init_v3_hw(struct hisi_hba *hisi_hba)
951 {
952         int i;
953
954         for (i = 0; i < hisi_hba->n_phy; i++) {
955                 struct hisi_sas_phy *phy = &hisi_hba->phy[i];
956                 struct asd_sas_phy *sas_phy = &phy->sas_phy;
957
958                 if (!sas_phy->phy->enabled)
959                         continue;
960
961                 hisi_sas_phy_enable(hisi_hba, i, 1);
962         }
963 }
964
965 static void sl_notify_ssp_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
966 {
967         u32 sl_control;
968
969         sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
970         sl_control |= SL_CONTROL_NOTIFY_EN_MSK;
971         hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
972         msleep(1);
973         sl_control = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
974         sl_control &= ~SL_CONTROL_NOTIFY_EN_MSK;
975         hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL, sl_control);
976 }
977
978 static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id)
979 {
980         int i, bitmap = 0;
981         u32 phy_port_num_ma = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
982         u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
983
984         for (i = 0; i < hisi_hba->n_phy; i++)
985                 if (phy_state & BIT(i))
986                         if (((phy_port_num_ma >> (i * 4)) & 0xf) == port_id)
987                                 bitmap |= BIT(i);
988
989         return bitmap;
990 }
991
992 /**
993  * The callpath to this function and upto writing the write
994  * queue pointer should be safe from interruption.
995  */
996 static int
997 get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
998 {
999         struct device *dev = hisi_hba->dev;
1000         int queue = dq->id;
1001         u32 r, w;
1002
1003         w = dq->wr_point;
1004         r = hisi_sas_read32_relaxed(hisi_hba,
1005                                 DLVRY_Q_0_RD_PTR + (queue * 0x14));
1006         if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) {
1007                 dev_warn(dev, "full queue=%d r=%d w=%d\n",
1008                          queue, r, w);
1009                 return -EAGAIN;
1010         }
1011
1012         dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
1013
1014         return w;
1015 }
1016
1017 static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
1018 {
1019         struct hisi_hba *hisi_hba = dq->hisi_hba;
1020         struct hisi_sas_slot *s, *s1, *s2 = NULL;
1021         int dlvry_queue = dq->id;
1022         int wp;
1023
1024         list_for_each_entry_safe(s, s1, &dq->list, delivery) {
1025                 if (!s->ready)
1026                         break;
1027                 s2 = s;
1028                 list_del(&s->delivery);
1029         }
1030
1031         if (!s2)
1032                 return;
1033
1034         /*
1035          * Ensure that memories for slots built on other CPUs is observed.
1036          */
1037         smp_rmb();
1038         wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
1039
1040         hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
1041 }
1042
1043 static void prep_prd_sge_v3_hw(struct hisi_hba *hisi_hba,
1044                               struct hisi_sas_slot *slot,
1045                               struct hisi_sas_cmd_hdr *hdr,
1046                               struct scatterlist *scatter,
1047                               int n_elem)
1048 {
1049         struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
1050         struct scatterlist *sg;
1051         int i;
1052
1053         for_each_sg(scatter, sg, n_elem, i) {
1054                 struct hisi_sas_sge *entry = &sge_page->sge[i];
1055
1056                 entry->addr = cpu_to_le64(sg_dma_address(sg));
1057                 entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
1058                 entry->data_len = cpu_to_le32(sg_dma_len(sg));
1059                 entry->data_off = 0;
1060         }
1061
1062         hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
1063
1064         hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
1065 }
1066
1067 static void prep_prd_sge_dif_v3_hw(struct hisi_hba *hisi_hba,
1068                                    struct hisi_sas_slot *slot,
1069                                    struct hisi_sas_cmd_hdr *hdr,
1070                                    struct scatterlist *scatter,
1071                                    int n_elem)
1072 {
1073         struct hisi_sas_sge_dif_page *sge_dif_page;
1074         struct scatterlist *sg;
1075         int i;
1076
1077         sge_dif_page = hisi_sas_sge_dif_addr_mem(slot);
1078
1079         for_each_sg(scatter, sg, n_elem, i) {
1080                 struct hisi_sas_sge *entry = &sge_dif_page->sge[i];
1081
1082                 entry->addr = cpu_to_le64(sg_dma_address(sg));
1083                 entry->page_ctrl_0 = 0;
1084                 entry->page_ctrl_1 = 0;
1085                 entry->data_len = cpu_to_le32(sg_dma_len(sg));
1086                 entry->data_off = 0;
1087         }
1088
1089         hdr->dif_prd_table_addr =
1090                 cpu_to_le64(hisi_sas_sge_dif_addr_dma(slot));
1091
1092         hdr->sg_len |= cpu_to_le32(n_elem << CMD_HDR_DIF_SGL_LEN_OFF);
1093 }
1094
1095 static u32 get_prot_chk_msk_v3_hw(struct scsi_cmnd *scsi_cmnd)
1096 {
1097         unsigned char prot_flags = scsi_cmnd->prot_flags;
1098
1099         if (prot_flags & SCSI_PROT_REF_CHECK)
1100                 return T10_CHK_APP_TAG_MSK;
1101         return T10_CHK_REF_TAG_MSK | T10_CHK_APP_TAG_MSK;
1102 }
1103
1104 static void fill_prot_v3_hw(struct scsi_cmnd *scsi_cmnd,
1105                             struct hisi_sas_protect_iu_v3_hw *prot)
1106 {
1107         unsigned char prot_op = scsi_get_prot_op(scsi_cmnd);
1108         unsigned int interval = scsi_prot_interval(scsi_cmnd);
1109         u32 lbrt_chk_val = t10_pi_ref_tag(scsi_cmnd->request);
1110
1111         switch (prot_op) {
1112         case SCSI_PROT_READ_INSERT:
1113                 prot->dw0 |= T10_INSRT_EN_MSK;
1114                 prot->lbrtgv = lbrt_chk_val;
1115                 break;
1116         case SCSI_PROT_READ_STRIP:
1117                 prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK);
1118                 prot->lbrtcv = lbrt_chk_val;
1119                 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
1120                 break;
1121         case SCSI_PROT_READ_PASS:
1122                 prot->dw0 |= T10_CHK_EN_MSK;
1123                 prot->lbrtcv = lbrt_chk_val;
1124                 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
1125                 break;
1126         case SCSI_PROT_WRITE_INSERT:
1127                 prot->dw0 |= T10_INSRT_EN_MSK;
1128                 prot->lbrtgv = lbrt_chk_val;
1129                 break;
1130         case SCSI_PROT_WRITE_STRIP:
1131                 prot->dw0 |= (T10_RMV_EN_MSK | T10_CHK_EN_MSK);
1132                 prot->lbrtcv = lbrt_chk_val;
1133                 break;
1134         case SCSI_PROT_WRITE_PASS:
1135                 prot->dw0 |= T10_CHK_EN_MSK;
1136                 prot->lbrtcv = lbrt_chk_val;
1137                 prot->dw4 |= get_prot_chk_msk_v3_hw(scsi_cmnd);
1138                 break;
1139         default:
1140                 WARN(1, "prot_op(0x%x) is not valid\n", prot_op);
1141                 break;
1142         }
1143
1144         switch (interval) {
1145         case 512:
1146                 break;
1147         case 4096:
1148                 prot->dw0 |= (0x1 << USR_DATA_BLOCK_SZ_OFF);
1149                 break;
1150         case 520:
1151                 prot->dw0 |= (0x2 << USR_DATA_BLOCK_SZ_OFF);
1152                 break;
1153         default:
1154                 WARN(1, "protection interval (0x%x) invalid\n",
1155                      interval);
1156                 break;
1157         }
1158
1159         prot->dw0 |= INCR_LBRT_MSK;
1160 }
1161
1162 static void prep_ssp_v3_hw(struct hisi_hba *hisi_hba,
1163                           struct hisi_sas_slot *slot)
1164 {
1165         struct sas_task *task = slot->task;
1166         struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1167         struct domain_device *device = task->dev;
1168         struct hisi_sas_device *sas_dev = device->lldd_dev;
1169         struct hisi_sas_port *port = slot->port;
1170         struct sas_ssp_task *ssp_task = &task->ssp_task;
1171         struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
1172         struct hisi_sas_tmf_task *tmf = slot->tmf;
1173         int has_data = 0, priority = !!tmf;
1174         unsigned char prot_op;
1175         u8 *buf_cmd;
1176         u32 dw1 = 0, dw2 = 0, len = 0;
1177
1178         hdr->dw0 = cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF) |
1179                                (2 << CMD_HDR_TLR_CTRL_OFF) |
1180                                (port->id << CMD_HDR_PORT_OFF) |
1181                                (priority << CMD_HDR_PRIORITY_OFF) |
1182                                (1 << CMD_HDR_CMD_OFF)); /* ssp */
1183
1184         dw1 = 1 << CMD_HDR_VDTL_OFF;
1185         if (tmf) {
1186                 dw1 |= 2 << CMD_HDR_FRAME_TYPE_OFF;
1187                 dw1 |= DIR_NO_DATA << CMD_HDR_DIR_OFF;
1188         } else {
1189                 prot_op = scsi_get_prot_op(scsi_cmnd);
1190                 dw1 |= 1 << CMD_HDR_FRAME_TYPE_OFF;
1191                 switch (scsi_cmnd->sc_data_direction) {
1192                 case DMA_TO_DEVICE:
1193                         has_data = 1;
1194                         dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
1195                         break;
1196                 case DMA_FROM_DEVICE:
1197                         has_data = 1;
1198                         dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
1199                         break;
1200                 default:
1201                         dw1 &= ~CMD_HDR_DIR_MSK;
1202                 }
1203         }
1204
1205         /* map itct entry */
1206         dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
1207
1208         dw2 = (((sizeof(struct ssp_command_iu) + sizeof(struct ssp_frame_hdr)
1209               + 3) / 4) << CMD_HDR_CFL_OFF) |
1210               ((HISI_SAS_MAX_SSP_RESP_SZ / 4) << CMD_HDR_MRFL_OFF) |
1211               (2 << CMD_HDR_SG_MOD_OFF);
1212         hdr->dw2 = cpu_to_le32(dw2);
1213         hdr->transfer_tags = cpu_to_le32(slot->idx);
1214
1215         if (has_data) {
1216                 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
1217                                    slot->n_elem);
1218
1219                 if (scsi_prot_sg_count(scsi_cmnd))
1220                         prep_prd_sge_dif_v3_hw(hisi_hba, slot, hdr,
1221                                                scsi_prot_sglist(scsi_cmnd),
1222                                                slot->n_elem_dif);
1223         }
1224
1225         hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
1226         hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
1227
1228         buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
1229                 sizeof(struct ssp_frame_hdr);
1230
1231         memcpy(buf_cmd, &task->ssp_task.LUN, 8);
1232         if (!tmf) {
1233                 buf_cmd[9] = ssp_task->task_attr | (ssp_task->task_prio << 3);
1234                 memcpy(buf_cmd + 12, scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
1235         } else {
1236                 buf_cmd[10] = tmf->tmf;
1237                 switch (tmf->tmf) {
1238                 case TMF_ABORT_TASK:
1239                 case TMF_QUERY_TASK:
1240                         buf_cmd[12] =
1241                                 (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
1242                         buf_cmd[13] =
1243                                 tmf->tag_of_task_to_be_managed & 0xff;
1244                         break;
1245                 default:
1246                         break;
1247                 }
1248         }
1249
1250         if (has_data && (prot_op != SCSI_PROT_NORMAL)) {
1251                 struct hisi_sas_protect_iu_v3_hw prot;
1252                 u8 *buf_cmd_prot;
1253
1254                 hdr->dw7 |= cpu_to_le32(1 << CMD_HDR_ADDR_MODE_SEL_OFF);
1255                 dw1 |= CMD_HDR_PIR_MSK;
1256                 buf_cmd_prot = hisi_sas_cmd_hdr_addr_mem(slot) +
1257                                sizeof(struct ssp_frame_hdr) +
1258                                sizeof(struct ssp_command_iu);
1259
1260                 memset(&prot, 0, sizeof(struct hisi_sas_protect_iu_v3_hw));
1261                 fill_prot_v3_hw(scsi_cmnd, &prot);
1262                 memcpy(buf_cmd_prot, &prot,
1263                        sizeof(struct hisi_sas_protect_iu_v3_hw));
1264                 /*
1265                  * For READ, we need length of info read to memory, while for
1266                  * WRITE we need length of data written to the disk.
1267                  */
1268                 if (prot_op == SCSI_PROT_WRITE_INSERT ||
1269                     prot_op == SCSI_PROT_READ_INSERT ||
1270                     prot_op == SCSI_PROT_WRITE_PASS ||
1271                     prot_op == SCSI_PROT_READ_PASS) {
1272                         unsigned int interval = scsi_prot_interval(scsi_cmnd);
1273                         unsigned int ilog2_interval = ilog2(interval);
1274
1275                         len = (task->total_xfer_len >> ilog2_interval) * 8;
1276                 }
1277         }
1278
1279         hdr->dw1 = cpu_to_le32(dw1);
1280
1281         hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len + len);
1282 }
1283
1284 static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
1285                           struct hisi_sas_slot *slot)
1286 {
1287         struct sas_task *task = slot->task;
1288         struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1289         struct domain_device *device = task->dev;
1290         struct hisi_sas_port *port = slot->port;
1291         struct scatterlist *sg_req;
1292         struct hisi_sas_device *sas_dev = device->lldd_dev;
1293         dma_addr_t req_dma_addr;
1294         unsigned int req_len;
1295
1296         /* req */
1297         sg_req = &task->smp_task.smp_req;
1298         req_len = sg_dma_len(sg_req);
1299         req_dma_addr = sg_dma_address(sg_req);
1300
1301         /* create header */
1302         /* dw0 */
1303         hdr->dw0 = cpu_to_le32((port->id << CMD_HDR_PORT_OFF) |
1304                                (1 << CMD_HDR_PRIORITY_OFF) | /* high pri */
1305                                (2 << CMD_HDR_CMD_OFF)); /* smp */
1306
1307         /* map itct entry */
1308         hdr->dw1 = cpu_to_le32((sas_dev->device_id << CMD_HDR_DEV_ID_OFF) |
1309                                (1 << CMD_HDR_FRAME_TYPE_OFF) |
1310                                (DIR_NO_DATA << CMD_HDR_DIR_OFF));
1311
1312         /* dw2 */
1313         hdr->dw2 = cpu_to_le32((((req_len - 4) / 4) << CMD_HDR_CFL_OFF) |
1314                                (HISI_SAS_MAX_SMP_RESP_SZ / 4 <<
1315                                CMD_HDR_MRFL_OFF));
1316
1317         hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
1318
1319         hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
1320         hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
1321
1322 }
1323
1324 static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
1325                           struct hisi_sas_slot *slot)
1326 {
1327         struct sas_task *task = slot->task;
1328         struct domain_device *device = task->dev;
1329         struct domain_device *parent_dev = device->parent;
1330         struct hisi_sas_device *sas_dev = device->lldd_dev;
1331         struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1332         struct asd_sas_port *sas_port = device->port;
1333         struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1334         u8 *buf_cmd;
1335         int has_data = 0, hdr_tag = 0;
1336         u32 dw1 = 0, dw2 = 0;
1337
1338         hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
1339         if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type))
1340                 hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
1341         else
1342                 hdr->dw0 |= cpu_to_le32(4 << CMD_HDR_CMD_OFF);
1343
1344         switch (task->data_dir) {
1345         case DMA_TO_DEVICE:
1346                 has_data = 1;
1347                 dw1 |= DIR_TO_DEVICE << CMD_HDR_DIR_OFF;
1348                 break;
1349         case DMA_FROM_DEVICE:
1350                 has_data = 1;
1351                 dw1 |= DIR_TO_INI << CMD_HDR_DIR_OFF;
1352                 break;
1353         default:
1354                 dw1 &= ~CMD_HDR_DIR_MSK;
1355         }
1356
1357         if ((task->ata_task.fis.command == ATA_CMD_DEV_RESET) &&
1358                         (task->ata_task.fis.control & ATA_SRST))
1359                 dw1 |= 1 << CMD_HDR_RESET_OFF;
1360
1361         dw1 |= (hisi_sas_get_ata_protocol(
1362                 &task->ata_task.fis, task->data_dir))
1363                 << CMD_HDR_FRAME_TYPE_OFF;
1364         dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
1365
1366         if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis))
1367                 dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF;
1368
1369         hdr->dw1 = cpu_to_le32(dw1);
1370
1371         /* dw2 */
1372         if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
1373                 task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
1374                 dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
1375         }
1376
1377         dw2 |= (HISI_SAS_MAX_STP_RESP_SZ / 4) << CMD_HDR_CFL_OFF |
1378                         2 << CMD_HDR_SG_MOD_OFF;
1379         hdr->dw2 = cpu_to_le32(dw2);
1380
1381         /* dw3 */
1382         hdr->transfer_tags = cpu_to_le32(slot->idx);
1383
1384         if (has_data)
1385                 prep_prd_sge_v3_hw(hisi_hba, slot, hdr, task->scatter,
1386                                         slot->n_elem);
1387
1388         hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
1389         hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
1390         hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
1391
1392         buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot);
1393
1394         if (likely(!task->ata_task.device_control_reg_update))
1395                 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
1396         /* fill in command FIS */
1397         memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
1398 }
1399
1400 static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
1401                 struct hisi_sas_slot *slot,
1402                 int device_id, int abort_flag, int tag_to_abort)
1403 {
1404         struct sas_task *task = slot->task;
1405         struct domain_device *dev = task->dev;
1406         struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
1407         struct hisi_sas_port *port = slot->port;
1408
1409         /* dw0 */
1410         hdr->dw0 = cpu_to_le32((5 << CMD_HDR_CMD_OFF) | /*abort*/
1411                                (port->id << CMD_HDR_PORT_OFF) |
1412                                    (dev_is_sata(dev)
1413                                         << CMD_HDR_ABORT_DEVICE_TYPE_OFF) |
1414                                         (abort_flag
1415                                          << CMD_HDR_ABORT_FLAG_OFF));
1416
1417         /* dw1 */
1418         hdr->dw1 = cpu_to_le32(device_id
1419                         << CMD_HDR_DEV_ID_OFF);
1420
1421         /* dw7 */
1422         hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
1423         hdr->transfer_tags = cpu_to_le32(slot->idx);
1424
1425 }
1426
1427 static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1428 {
1429         int i;
1430         irqreturn_t res;
1431         u32 context, port_id, link_rate;
1432         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1433         struct asd_sas_phy *sas_phy = &phy->sas_phy;
1434         struct device *dev = hisi_hba->dev;
1435         unsigned long flags;
1436
1437         del_timer(&phy->timer);
1438         hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
1439
1440         port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
1441         port_id = (port_id >> (4 * phy_no)) & 0xf;
1442         link_rate = hisi_sas_read32(hisi_hba, PHY_CONN_RATE);
1443         link_rate = (link_rate >> (phy_no * 4)) & 0xf;
1444
1445         if (port_id == 0xf) {
1446                 dev_err(dev, "phyup: phy%d invalid portid\n", phy_no);
1447                 res = IRQ_NONE;
1448                 goto end;
1449         }
1450         sas_phy->linkrate = link_rate;
1451         phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
1452
1453         /* Check for SATA dev */
1454         context = hisi_sas_read32(hisi_hba, PHY_CONTEXT);
1455         if (context & (1 << phy_no)) {
1456                 struct hisi_sas_initial_fis *initial_fis;
1457                 struct dev_to_host_fis *fis;
1458                 u8 attached_sas_addr[SAS_ADDR_SIZE] = {0};
1459                 struct Scsi_Host *shost = hisi_hba->shost;
1460
1461                 dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
1462                 initial_fis = &hisi_hba->initial_fis[phy_no];
1463                 fis = &initial_fis->fis;
1464
1465                 /* check ERR bit of Status Register */
1466                 if (fis->status & ATA_ERR) {
1467                         dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n",
1468                                  phy_no, fis->status);
1469                         hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
1470                         res = IRQ_NONE;
1471                         goto end;
1472                 }
1473
1474                 sas_phy->oob_mode = SATA_OOB_MODE;
1475                 attached_sas_addr[0] = 0x50;
1476                 attached_sas_addr[6] = shost->host_no;
1477                 attached_sas_addr[7] = phy_no;
1478                 memcpy(sas_phy->attached_sas_addr,
1479                        attached_sas_addr,
1480                        SAS_ADDR_SIZE);
1481                 memcpy(sas_phy->frame_rcvd, fis,
1482                        sizeof(struct dev_to_host_fis));
1483                 phy->phy_type |= PORT_TYPE_SATA;
1484                 phy->identify.device_type = SAS_SATA_DEV;
1485                 phy->frame_rcvd_size = sizeof(struct dev_to_host_fis);
1486                 phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
1487         } else {
1488                 u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
1489                 struct sas_identify_frame *id =
1490                         (struct sas_identify_frame *)frame_rcvd;
1491
1492                 dev_info(dev, "phyup: phy%d link_rate=%d\n", phy_no, link_rate);
1493                 for (i = 0; i < 6; i++) {
1494                         u32 idaf = hisi_sas_phy_read32(hisi_hba, phy_no,
1495                                                RX_IDAF_DWORD0 + (i * 4));
1496                         frame_rcvd[i] = __swab32(idaf);
1497                 }
1498                 sas_phy->oob_mode = SAS_OOB_MODE;
1499                 memcpy(sas_phy->attached_sas_addr,
1500                        &id->sas_addr,
1501                        SAS_ADDR_SIZE);
1502                 phy->phy_type |= PORT_TYPE_SAS;
1503                 phy->identify.device_type = id->dev_type;
1504                 phy->frame_rcvd_size = sizeof(struct sas_identify_frame);
1505                 if (phy->identify.device_type == SAS_END_DEVICE)
1506                         phy->identify.target_port_protocols =
1507                                 SAS_PROTOCOL_SSP;
1508                 else if (phy->identify.device_type != SAS_PHY_UNUSED)
1509                         phy->identify.target_port_protocols =
1510                                 SAS_PROTOCOL_SMP;
1511         }
1512
1513         phy->port_id = port_id;
1514         phy->phy_attached = 1;
1515         hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
1516         res = IRQ_HANDLED;
1517         spin_lock_irqsave(&phy->lock, flags);
1518         if (phy->reset_completion) {
1519                 phy->in_reset = 0;
1520                 complete(phy->reset_completion);
1521         }
1522         spin_unlock_irqrestore(&phy->lock, flags);
1523 end:
1524         hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1525                              CHL_INT0_SL_PHY_ENABLE_MSK);
1526         hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 0);
1527
1528         return res;
1529 }
1530
1531 static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1532 {
1533         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1534         u32 phy_state, sl_ctrl, txid_auto;
1535         struct device *dev = hisi_hba->dev;
1536
1537         del_timer(&phy->timer);
1538         hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1);
1539
1540         phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
1541         dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
1542         hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
1543
1544         sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
1545         hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
1546                                                 sl_ctrl&(~SL_CTA_MSK));
1547
1548         txid_auto = hisi_sas_phy_read32(hisi_hba, phy_no, TXID_AUTO);
1549         hisi_sas_phy_write32(hisi_hba, phy_no, TXID_AUTO,
1550                                                 txid_auto | CT3_MSK);
1551
1552         hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0, CHL_INT0_NOT_RDY_MSK);
1553         hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 0);
1554
1555         return IRQ_HANDLED;
1556 }
1557
1558 static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1559 {
1560         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1561         struct asd_sas_phy *sas_phy = &phy->sas_phy;
1562         struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1563         u32 bcast_status;
1564
1565         hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
1566         bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
1567         if ((bcast_status & RX_BCAST_CHG_MSK) &&
1568             !test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
1569                 sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
1570         hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1571                              CHL_INT0_SL_RX_BCST_ACK_MSK);
1572         hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
1573
1574         return IRQ_HANDLED;
1575 }
1576
1577 static irqreturn_t int_phy_up_down_bcast_v3_hw(int irq_no, void *p)
1578 {
1579         struct hisi_hba *hisi_hba = p;
1580         u32 irq_msk;
1581         int phy_no = 0;
1582         irqreturn_t res = IRQ_NONE;
1583
1584         irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
1585                                 & 0x11111111;
1586         while (irq_msk) {
1587                 if (irq_msk  & 1) {
1588                         u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no,
1589                                                             CHL_INT0);
1590                         u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
1591                         int rdy = phy_state & (1 << phy_no);
1592
1593                         if (rdy) {
1594                                 if (irq_value & CHL_INT0_SL_PHY_ENABLE_MSK)
1595                                         /* phy up */
1596                                         if (phy_up_v3_hw(phy_no, hisi_hba)
1597                                                         == IRQ_HANDLED)
1598                                                 res = IRQ_HANDLED;
1599                                 if (irq_value & CHL_INT0_SL_RX_BCST_ACK_MSK)
1600                                         /* phy bcast */
1601                                         if (phy_bcast_v3_hw(phy_no, hisi_hba)
1602                                                         == IRQ_HANDLED)
1603                                                 res = IRQ_HANDLED;
1604                         } else {
1605                                 if (irq_value & CHL_INT0_NOT_RDY_MSK)
1606                                         /* phy down */
1607                                         if (phy_down_v3_hw(phy_no, hisi_hba)
1608                                                         == IRQ_HANDLED)
1609                                                 res = IRQ_HANDLED;
1610                         }
1611                 }
1612                 irq_msk >>= 4;
1613                 phy_no++;
1614         }
1615
1616         return res;
1617 }
1618
1619 static const struct hisi_sas_hw_error port_axi_error[] = {
1620         {
1621                 .irq_msk = BIT(CHL_INT1_DMAC_TX_ECC_MB_ERR_OFF),
1622                 .msg = "dmac_tx_ecc_bad_err",
1623         },
1624         {
1625                 .irq_msk = BIT(CHL_INT1_DMAC_RX_ECC_MB_ERR_OFF),
1626                 .msg = "dmac_rx_ecc_bad_err",
1627         },
1628         {
1629                 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_WR_ERR_OFF),
1630                 .msg = "dma_tx_axi_wr_err",
1631         },
1632         {
1633                 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RD_ERR_OFF),
1634                 .msg = "dma_tx_axi_rd_err",
1635         },
1636         {
1637                 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_WR_ERR_OFF),
1638                 .msg = "dma_rx_axi_wr_err",
1639         },
1640         {
1641                 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF),
1642                 .msg = "dma_rx_axi_rd_err",
1643         },
1644         {
1645                 .irq_msk = BIT(CHL_INT1_DMAC_TX_FIFO_ERR_OFF),
1646                 .msg = "dma_tx_fifo_err",
1647         },
1648         {
1649                 .irq_msk = BIT(CHL_INT1_DMAC_RX_FIFO_ERR_OFF),
1650                 .msg = "dma_rx_fifo_err",
1651         },
1652         {
1653                 .irq_msk = BIT(CHL_INT1_DMAC_TX_AXI_RUSER_ERR_OFF),
1654                 .msg = "dma_tx_axi_ruser_err",
1655         },
1656         {
1657                 .irq_msk = BIT(CHL_INT1_DMAC_RX_AXI_RUSER_ERR_OFF),
1658                 .msg = "dma_rx_axi_ruser_err",
1659         },
1660 };
1661
1662 static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1663 {
1664         u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1);
1665         u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK);
1666         struct device *dev = hisi_hba->dev;
1667         int i;
1668
1669         irq_value &= ~irq_msk;
1670         if (!irq_value)
1671                 return;
1672
1673         for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
1674                 const struct hisi_sas_hw_error *error = &port_axi_error[i];
1675
1676                 if (!(irq_value & error->irq_msk))
1677                         continue;
1678
1679                 dev_err(dev, "%s error (phy%d 0x%x) found!\n",
1680                         error->msg, phy_no, irq_value);
1681                 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1682         }
1683
1684         hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value);
1685 }
1686
1687 static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1688 {
1689         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1690         struct asd_sas_phy *sas_phy = &phy->sas_phy;
1691         struct sas_phy *sphy = sas_phy->phy;
1692         unsigned long flags;
1693         u32 reg_value;
1694
1695         spin_lock_irqsave(&phy->lock, flags);
1696
1697         /* loss dword sync */
1698         reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
1699         sphy->loss_of_dword_sync_count += reg_value;
1700
1701         /* phy reset problem */
1702         reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
1703         sphy->phy_reset_problem_count += reg_value;
1704
1705         /* invalid dword */
1706         reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
1707         sphy->invalid_dword_count += reg_value;
1708
1709         /* disparity err */
1710         reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
1711         sphy->running_disparity_error_count += reg_value;
1712
1713         /* code violation error */
1714         reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR);
1715         phy->code_violation_err_count += reg_value;
1716
1717         spin_unlock_irqrestore(&phy->lock, flags);
1718 }
1719
1720 static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1721 {
1722         u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
1723         u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
1724         struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1725         struct pci_dev *pci_dev = hisi_hba->pci_dev;
1726         struct device *dev = hisi_hba->dev;
1727         static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) |
1728                         BIT(CHL_INT2_RX_CODE_ERR_OFF) |
1729                         BIT(CHL_INT2_RX_INVLD_DW_OFF);
1730
1731         irq_value &= ~irq_msk;
1732         if (!irq_value)
1733                 return;
1734
1735         if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
1736                 dev_warn(dev, "phy%d identify timeout\n", phy_no);
1737                 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
1738         }
1739
1740         if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
1741                 u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no,
1742                                 STP_LINK_TIMEOUT_STATE);
1743
1744                 dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
1745                          phy_no, reg_value);
1746                 if (reg_value & BIT(4))
1747                         hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
1748         }
1749
1750         if (pci_dev->revision > 0x20 && (irq_value & msk)) {
1751                 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1752                 struct sas_phy *sphy = sas_phy->phy;
1753
1754                 phy_get_events_v3_hw(hisi_hba, phy_no);
1755
1756                 if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF))
1757                         dev_info(dev, "phy%d invalid dword cnt:   %u\n", phy_no,
1758                                  sphy->invalid_dword_count);
1759
1760                 if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF))
1761                         dev_info(dev, "phy%d code violation cnt:  %u\n", phy_no,
1762                                  phy->code_violation_err_count);
1763
1764                 if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF))
1765                         dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no,
1766                                  sphy->running_disparity_error_count);
1767         }
1768
1769         if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
1770             (pci_dev->revision == 0x20)) {
1771                 u32 reg_value;
1772                 int rc;
1773
1774                 rc = hisi_sas_read32_poll_timeout_atomic(
1775                                 HILINK_ERR_DFX, reg_value,
1776                                 !((reg_value >> 8) & BIT(phy_no)),
1777                                 1000, 10000);
1778                 if (rc)
1779                         hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
1780         }
1781
1782         hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value);
1783 }
1784
1785 static void handle_chl_int0_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1786 {
1787         u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT0);
1788
1789         if (irq_value0 & CHL_INT0_PHY_RDY_MSK)
1790                 hisi_sas_phy_oob_ready(hisi_hba, phy_no);
1791
1792         hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
1793                              irq_value0 & (~CHL_INT0_SL_RX_BCST_ACK_MSK)
1794                              & (~CHL_INT0_SL_PHY_ENABLE_MSK)
1795                              & (~CHL_INT0_NOT_RDY_MSK));
1796 }
1797
1798 static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
1799 {
1800         struct hisi_hba *hisi_hba = p;
1801         u32 irq_msk;
1802         int phy_no = 0;
1803
1804         irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS)
1805                                 & 0xeeeeeeee;
1806
1807         while (irq_msk) {
1808                 if (irq_msk & (2 << (phy_no * 4)))
1809                         handle_chl_int0_v3_hw(hisi_hba, phy_no);
1810
1811                 if (irq_msk & (4 << (phy_no * 4)))
1812                         handle_chl_int1_v3_hw(hisi_hba, phy_no);
1813
1814                 if (irq_msk & (8 << (phy_no * 4)))
1815                         handle_chl_int2_v3_hw(hisi_hba, phy_no);
1816
1817                 irq_msk &= ~(0xe << (phy_no * 4));
1818                 phy_no++;
1819         }
1820
1821         return IRQ_HANDLED;
1822 }
1823
1824 static const struct hisi_sas_hw_error multi_bit_ecc_errors[] = {
1825         {
1826                 .irq_msk = BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF),
1827                 .msk = HGC_DQE_ECC_MB_ADDR_MSK,
1828                 .shift = HGC_DQE_ECC_MB_ADDR_OFF,
1829                 .msg = "hgc_dqe_eccbad_intr found: ram addr is 0x%08X\n",
1830                 .reg = HGC_DQE_ECC_ADDR,
1831         },
1832         {
1833                 .irq_msk = BIT(SAS_ECC_INTR_IOST_ECC_MB_OFF),
1834                 .msk = HGC_IOST_ECC_MB_ADDR_MSK,
1835                 .shift = HGC_IOST_ECC_MB_ADDR_OFF,
1836                 .msg = "hgc_iost_eccbad_intr found: ram addr is 0x%08X\n",
1837                 .reg = HGC_IOST_ECC_ADDR,
1838         },
1839         {
1840                 .irq_msk = BIT(SAS_ECC_INTR_ITCT_ECC_MB_OFF),
1841                 .msk = HGC_ITCT_ECC_MB_ADDR_MSK,
1842                 .shift = HGC_ITCT_ECC_MB_ADDR_OFF,
1843                 .msg = "hgc_itct_eccbad_intr found: ram addr is 0x%08X\n",
1844                 .reg = HGC_ITCT_ECC_ADDR,
1845         },
1846         {
1847                 .irq_msk = BIT(SAS_ECC_INTR_IOSTLIST_ECC_MB_OFF),
1848                 .msk = HGC_LM_DFX_STATUS2_IOSTLIST_MSK,
1849                 .shift = HGC_LM_DFX_STATUS2_IOSTLIST_OFF,
1850                 .msg = "hgc_iostl_eccbad_intr found: mem addr is 0x%08X\n",
1851                 .reg = HGC_LM_DFX_STATUS2,
1852         },
1853         {
1854                 .irq_msk = BIT(SAS_ECC_INTR_ITCTLIST_ECC_MB_OFF),
1855                 .msk = HGC_LM_DFX_STATUS2_ITCTLIST_MSK,
1856                 .shift = HGC_LM_DFX_STATUS2_ITCTLIST_OFF,
1857                 .msg = "hgc_itctl_eccbad_intr found: mem addr is 0x%08X\n",
1858                 .reg = HGC_LM_DFX_STATUS2,
1859         },
1860         {
1861                 .irq_msk = BIT(SAS_ECC_INTR_CQE_ECC_MB_OFF),
1862                 .msk = HGC_CQE_ECC_MB_ADDR_MSK,
1863                 .shift = HGC_CQE_ECC_MB_ADDR_OFF,
1864                 .msg = "hgc_cqe_eccbad_intr found: ram address is 0x%08X\n",
1865                 .reg = HGC_CQE_ECC_ADDR,
1866         },
1867         {
1868                 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM0_ECC_MB_OFF),
1869                 .msk = HGC_RXM_DFX_STATUS14_MEM0_MSK,
1870                 .shift = HGC_RXM_DFX_STATUS14_MEM0_OFF,
1871                 .msg = "rxm_mem0_eccbad_intr found: mem addr is 0x%08X\n",
1872                 .reg = HGC_RXM_DFX_STATUS14,
1873         },
1874         {
1875                 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM1_ECC_MB_OFF),
1876                 .msk = HGC_RXM_DFX_STATUS14_MEM1_MSK,
1877                 .shift = HGC_RXM_DFX_STATUS14_MEM1_OFF,
1878                 .msg = "rxm_mem1_eccbad_intr found: mem addr is 0x%08X\n",
1879                 .reg = HGC_RXM_DFX_STATUS14,
1880         },
1881         {
1882                 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM2_ECC_MB_OFF),
1883                 .msk = HGC_RXM_DFX_STATUS14_MEM2_MSK,
1884                 .shift = HGC_RXM_DFX_STATUS14_MEM2_OFF,
1885                 .msg = "rxm_mem2_eccbad_intr found: mem addr is 0x%08X\n",
1886                 .reg = HGC_RXM_DFX_STATUS14,
1887         },
1888         {
1889                 .irq_msk = BIT(SAS_ECC_INTR_NCQ_MEM3_ECC_MB_OFF),
1890                 .msk = HGC_RXM_DFX_STATUS15_MEM3_MSK,
1891                 .shift = HGC_RXM_DFX_STATUS15_MEM3_OFF,
1892                 .msg = "rxm_mem3_eccbad_intr found: mem addr is 0x%08X\n",
1893                 .reg = HGC_RXM_DFX_STATUS15,
1894         },
1895         {
1896                 .irq_msk = BIT(SAS_ECC_INTR_OOO_RAM_ECC_MB_OFF),
1897                 .msk = AM_ROB_ECC_ERR_ADDR_MSK,
1898                 .shift = AM_ROB_ECC_ERR_ADDR_OFF,
1899                 .msg = "ooo_ram_eccbad_intr found: ROB_ECC_ERR_ADDR=0x%08X\n",
1900                 .reg = AM_ROB_ECC_ERR_ADDR,
1901         },
1902 };
1903
1904 static void multi_bit_ecc_error_process_v3_hw(struct hisi_hba *hisi_hba,
1905                                               u32 irq_value)
1906 {
1907         struct device *dev = hisi_hba->dev;
1908         const struct hisi_sas_hw_error *ecc_error;
1909         u32 val;
1910         int i;
1911
1912         for (i = 0; i < ARRAY_SIZE(multi_bit_ecc_errors); i++) {
1913                 ecc_error = &multi_bit_ecc_errors[i];
1914                 if (irq_value & ecc_error->irq_msk) {
1915                         val = hisi_sas_read32(hisi_hba, ecc_error->reg);
1916                         val &= ecc_error->msk;
1917                         val >>= ecc_error->shift;
1918                         dev_err(dev, ecc_error->msg, irq_value, val);
1919                         queue_work(hisi_hba->wq, &hisi_hba->rst_work);
1920                 }
1921         }
1922 }
1923
1924 static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba)
1925 {
1926         u32 irq_value, irq_msk;
1927
1928         irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK);
1929         hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff);
1930
1931         irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
1932         if (irq_value)
1933                 multi_bit_ecc_error_process_v3_hw(hisi_hba, irq_value);
1934
1935         hisi_sas_write32(hisi_hba, SAS_ECC_INTR, irq_value);
1936         hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk);
1937 }
1938
1939 static const struct hisi_sas_hw_error axi_error[] = {
1940         { .msk = BIT(0), .msg = "IOST_AXI_W_ERR" },
1941         { .msk = BIT(1), .msg = "IOST_AXI_R_ERR" },
1942         { .msk = BIT(2), .msg = "ITCT_AXI_W_ERR" },
1943         { .msk = BIT(3), .msg = "ITCT_AXI_R_ERR" },
1944         { .msk = BIT(4), .msg = "SATA_AXI_W_ERR" },
1945         { .msk = BIT(5), .msg = "SATA_AXI_R_ERR" },
1946         { .msk = BIT(6), .msg = "DQE_AXI_R_ERR" },
1947         { .msk = BIT(7), .msg = "CQE_AXI_W_ERR" },
1948         {}
1949 };
1950
1951 static const struct hisi_sas_hw_error fifo_error[] = {
1952         { .msk = BIT(8),  .msg = "CQE_WINFO_FIFO" },
1953         { .msk = BIT(9),  .msg = "CQE_MSG_FIFIO" },
1954         { .msk = BIT(10), .msg = "GETDQE_FIFO" },
1955         { .msk = BIT(11), .msg = "CMDP_FIFO" },
1956         { .msk = BIT(12), .msg = "AWTCTRL_FIFO" },
1957         {}
1958 };
1959
1960 static const struct hisi_sas_hw_error fatal_axi_error[] = {
1961         {
1962                 .irq_msk = BIT(ENT_INT_SRC3_WP_DEPTH_OFF),
1963                 .msg = "write pointer and depth",
1964         },
1965         {
1966                 .irq_msk = BIT(ENT_INT_SRC3_IPTT_SLOT_NOMATCH_OFF),
1967                 .msg = "iptt no match slot",
1968         },
1969         {
1970                 .irq_msk = BIT(ENT_INT_SRC3_RP_DEPTH_OFF),
1971                 .msg = "read pointer and depth",
1972         },
1973         {
1974                 .irq_msk = BIT(ENT_INT_SRC3_AXI_OFF),
1975                 .reg = HGC_AXI_FIFO_ERR_INFO,
1976                 .sub = axi_error,
1977         },
1978         {
1979                 .irq_msk = BIT(ENT_INT_SRC3_FIFO_OFF),
1980                 .reg = HGC_AXI_FIFO_ERR_INFO,
1981                 .sub = fifo_error,
1982         },
1983         {
1984                 .irq_msk = BIT(ENT_INT_SRC3_LM_OFF),
1985                 .msg = "LM add/fetch list",
1986         },
1987         {
1988                 .irq_msk = BIT(ENT_INT_SRC3_ABT_OFF),
1989                 .msg = "SAS_HGC_ABT fetch LM list",
1990         },
1991         {
1992                 .irq_msk = BIT(ENT_INT_SRC3_DQE_POISON_OFF),
1993                 .msg = "read dqe poison",
1994         },
1995         {
1996                 .irq_msk = BIT(ENT_INT_SRC3_IOST_POISON_OFF),
1997                 .msg = "read iost poison",
1998         },
1999         {
2000                 .irq_msk = BIT(ENT_INT_SRC3_ITCT_POISON_OFF),
2001                 .msg = "read itct poison",
2002         },
2003         {
2004                 .irq_msk = BIT(ENT_INT_SRC3_ITCT_NCQ_POISON_OFF),
2005                 .msg = "read itct ncq poison",
2006         },
2007
2008 };
2009
2010 static irqreturn_t fatal_axi_int_v3_hw(int irq_no, void *p)
2011 {
2012         u32 irq_value, irq_msk;
2013         struct hisi_hba *hisi_hba = p;
2014         struct device *dev = hisi_hba->dev;
2015         struct pci_dev *pdev = hisi_hba->pci_dev;
2016         int i;
2017
2018         irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
2019         hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0x1df00);
2020
2021         irq_value = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
2022         irq_value &= ~irq_msk;
2023
2024         for (i = 0; i < ARRAY_SIZE(fatal_axi_error); i++) {
2025                 const struct hisi_sas_hw_error *error = &fatal_axi_error[i];
2026
2027                 if (!(irq_value & error->irq_msk))
2028                         continue;
2029
2030                 if (error->sub) {
2031                         const struct hisi_sas_hw_error *sub = error->sub;
2032                         u32 err_value = hisi_sas_read32(hisi_hba, error->reg);
2033
2034                         for (; sub->msk || sub->msg; sub++) {
2035                                 if (!(err_value & sub->msk))
2036                                         continue;
2037
2038                                 dev_err(dev, "%s error (0x%x) found!\n",
2039                                         sub->msg, irq_value);
2040                                 queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2041                         }
2042                 } else {
2043                         dev_err(dev, "%s error (0x%x) found!\n",
2044                                 error->msg, irq_value);
2045                         queue_work(hisi_hba->wq, &hisi_hba->rst_work);
2046                 }
2047
2048                 if (pdev->revision < 0x21) {
2049                         u32 reg_val;
2050
2051                         reg_val = hisi_sas_read32(hisi_hba,
2052                                                   AXI_MASTER_CFG_BASE +
2053                                                   AM_CTRL_GLOBAL);
2054                         reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK;
2055                         hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
2056                                          AM_CTRL_GLOBAL, reg_val);
2057                 }
2058         }
2059
2060         fatal_ecc_int_v3_hw(hisi_hba);
2061
2062         if (irq_value & BIT(ENT_INT_SRC3_ITC_INT_OFF)) {
2063                 u32 reg_val = hisi_sas_read32(hisi_hba, ITCT_CLR);
2064                 u32 dev_id = reg_val & ITCT_DEV_MSK;
2065                 struct hisi_sas_device *sas_dev =
2066                                 &hisi_hba->devices[dev_id];
2067
2068                 hisi_sas_write32(hisi_hba, ITCT_CLR, 0);
2069                 dev_dbg(dev, "clear ITCT ok\n");
2070                 complete(sas_dev->completion);
2071         }
2072
2073         hisi_sas_write32(hisi_hba, ENT_INT_SRC3, irq_value & 0x1df00);
2074         hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk);
2075
2076         return IRQ_HANDLED;
2077 }
2078
2079 static void
2080 slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
2081                struct hisi_sas_slot *slot)
2082 {
2083         struct task_status_struct *ts = &task->task_status;
2084         struct hisi_sas_complete_v3_hdr *complete_queue =
2085                         hisi_hba->complete_hdr[slot->cmplt_queue];
2086         struct hisi_sas_complete_v3_hdr *complete_hdr =
2087                         &complete_queue[slot->cmplt_queue_slot];
2088         struct hisi_sas_err_record_v3 *record =
2089                         hisi_sas_status_buf_addr_mem(slot);
2090         u32 dma_rx_err_type = le32_to_cpu(record->dma_rx_err_type);
2091         u32 trans_tx_fail_type = le32_to_cpu(record->trans_tx_fail_type);
2092         u32 dw3 = le32_to_cpu(complete_hdr->dw3);
2093
2094         switch (task->task_proto) {
2095         case SAS_PROTOCOL_SSP:
2096                 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
2097                         ts->residual = trans_tx_fail_type;
2098                         ts->stat = SAS_DATA_UNDERRUN;
2099                 } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
2100                         ts->stat = SAS_QUEUE_FULL;
2101                         slot->abort = 1;
2102                 } else {
2103                         ts->stat = SAS_OPEN_REJECT;
2104                         ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2105                 }
2106                 break;
2107         case SAS_PROTOCOL_SATA:
2108         case SAS_PROTOCOL_STP:
2109         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2110                 if (dma_rx_err_type & RX_DATA_LEN_UNDERFLOW_MSK) {
2111                         ts->residual = trans_tx_fail_type;
2112                         ts->stat = SAS_DATA_UNDERRUN;
2113                 } else if (dw3 & CMPLT_HDR_IO_IN_TARGET_MSK) {
2114                         ts->stat = SAS_PHY_DOWN;
2115                         slot->abort = 1;
2116                 } else {
2117                         ts->stat = SAS_OPEN_REJECT;
2118                         ts->open_rej_reason = SAS_OREJ_RSVD_RETRY;
2119                 }
2120                 hisi_sas_sata_done(task, slot);
2121                 break;
2122         case SAS_PROTOCOL_SMP:
2123                 ts->stat = SAM_STAT_CHECK_CONDITION;
2124                 break;
2125         default:
2126                 break;
2127         }
2128 }
2129
2130 static int
2131 slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
2132 {
2133         struct sas_task *task = slot->task;
2134         struct hisi_sas_device *sas_dev;
2135         struct device *dev = hisi_hba->dev;
2136         struct task_status_struct *ts;
2137         struct domain_device *device;
2138         struct sas_ha_struct *ha;
2139         enum exec_status sts;
2140         struct hisi_sas_complete_v3_hdr *complete_queue =
2141                         hisi_hba->complete_hdr[slot->cmplt_queue];
2142         struct hisi_sas_complete_v3_hdr *complete_hdr =
2143                         &complete_queue[slot->cmplt_queue_slot];
2144         unsigned long flags;
2145         bool is_internal = slot->is_internal;
2146         u32 dw0, dw1, dw3;
2147
2148         if (unlikely(!task || !task->lldd_task || !task->dev))
2149                 return -EINVAL;
2150
2151         ts = &task->task_status;
2152         device = task->dev;
2153         ha = device->port->ha;
2154         sas_dev = device->lldd_dev;
2155
2156         spin_lock_irqsave(&task->task_state_lock, flags);
2157         task->task_state_flags &=
2158                 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
2159         spin_unlock_irqrestore(&task->task_state_lock, flags);
2160
2161         memset(ts, 0, sizeof(*ts));
2162         ts->resp = SAS_TASK_COMPLETE;
2163
2164         if (unlikely(!sas_dev)) {
2165                 dev_dbg(dev, "slot complete: port has not device\n");
2166                 ts->stat = SAS_PHY_DOWN;
2167                 goto out;
2168         }
2169
2170         dw0 = le32_to_cpu(complete_hdr->dw0);
2171         dw1 = le32_to_cpu(complete_hdr->dw1);
2172         dw3 = le32_to_cpu(complete_hdr->dw3);
2173
2174         /*
2175          * Use SAS+TMF status codes
2176          */
2177         switch ((dw0 & CMPLT_HDR_ABORT_STAT_MSK) >> CMPLT_HDR_ABORT_STAT_OFF) {
2178         case STAT_IO_ABORTED:
2179                 /* this IO has been aborted by abort command */
2180                 ts->stat = SAS_ABORTED_TASK;
2181                 goto out;
2182         case STAT_IO_COMPLETE:
2183                 /* internal abort command complete */
2184                 ts->stat = TMF_RESP_FUNC_SUCC;
2185                 goto out;
2186         case STAT_IO_NO_DEVICE:
2187                 ts->stat = TMF_RESP_FUNC_COMPLETE;
2188                 goto out;
2189         case STAT_IO_NOT_VALID:
2190                 /*
2191                  * abort single IO, the controller can't find the IO
2192                  */
2193                 ts->stat = TMF_RESP_FUNC_FAILED;
2194                 goto out;
2195         default:
2196                 break;
2197         }
2198
2199         /* check for erroneous completion */
2200         if ((dw0 & CMPLT_HDR_CMPLT_MSK) == 0x3) {
2201                 u32 *error_info = hisi_sas_status_buf_addr_mem(slot);
2202
2203                 slot_err_v3_hw(hisi_hba, task, slot);
2204                 if (ts->stat != SAS_DATA_UNDERRUN)
2205                         dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n",
2206                                  slot->idx, task, sas_dev->device_id,
2207                                  dw0, dw1, complete_hdr->act, dw3,
2208                                  error_info[0], error_info[1],
2209                                  error_info[2], error_info[3]);
2210                 if (unlikely(slot->abort))
2211                         return ts->stat;
2212                 goto out;
2213         }
2214
2215         switch (task->task_proto) {
2216         case SAS_PROTOCOL_SSP: {
2217                 struct ssp_response_iu *iu =
2218                         hisi_sas_status_buf_addr_mem(slot) +
2219                         sizeof(struct hisi_sas_err_record);
2220
2221                 sas_ssp_task_response(dev, task, iu);
2222                 break;
2223         }
2224         case SAS_PROTOCOL_SMP: {
2225                 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
2226                 void *to;
2227
2228                 ts->stat = SAM_STAT_GOOD;
2229                 to = kmap_atomic(sg_page(sg_resp));
2230
2231                 dma_unmap_sg(dev, &task->smp_task.smp_resp, 1,
2232                              DMA_FROM_DEVICE);
2233                 dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
2234                              DMA_TO_DEVICE);
2235                 memcpy(to + sg_resp->offset,
2236                         hisi_sas_status_buf_addr_mem(slot) +
2237                        sizeof(struct hisi_sas_err_record),
2238                        sg_dma_len(sg_resp));
2239                 kunmap_atomic(to);
2240                 break;
2241         }
2242         case SAS_PROTOCOL_SATA:
2243         case SAS_PROTOCOL_STP:
2244         case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
2245                 ts->stat = SAM_STAT_GOOD;
2246                 hisi_sas_sata_done(task, slot);
2247                 break;
2248         default:
2249                 ts->stat = SAM_STAT_CHECK_CONDITION;
2250                 break;
2251         }
2252
2253         if (!slot->port->port_attached) {
2254                 dev_warn(dev, "slot complete: port %d has removed\n",
2255                         slot->port->sas_port.id);
2256                 ts->stat = SAS_PHY_DOWN;
2257         }
2258
2259 out:
2260         sts = ts->stat;
2261         spin_lock_irqsave(&task->task_state_lock, flags);
2262         if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
2263                 spin_unlock_irqrestore(&task->task_state_lock, flags);
2264                 dev_info(dev, "slot complete: task(%p) aborted\n", task);
2265                 return SAS_ABORTED_TASK;
2266         }
2267         task->task_state_flags |= SAS_TASK_STATE_DONE;
2268         spin_unlock_irqrestore(&task->task_state_lock, flags);
2269         hisi_sas_slot_task_free(hisi_hba, task, slot);
2270
2271         if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
2272                 spin_lock_irqsave(&device->done_lock, flags);
2273                 if (test_bit(SAS_HA_FROZEN, &ha->state)) {
2274                         spin_unlock_irqrestore(&device->done_lock, flags);
2275                         dev_info(dev, "slot complete: task(%p) ignored\n ",
2276                                  task);
2277                         return sts;
2278                 }
2279                 spin_unlock_irqrestore(&device->done_lock, flags);
2280         }
2281
2282         if (task->task_done)
2283                 task->task_done(task);
2284
2285         return sts;
2286 }
2287
2288 static void cq_tasklet_v3_hw(unsigned long val)
2289 {
2290         struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val;
2291         struct hisi_hba *hisi_hba = cq->hisi_hba;
2292         struct hisi_sas_slot *slot;
2293         struct hisi_sas_complete_v3_hdr *complete_queue;
2294         u32 rd_point = cq->rd_point, wr_point;
2295         int queue = cq->id;
2296
2297         complete_queue = hisi_hba->complete_hdr[queue];
2298
2299         wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
2300                                    (0x14 * queue));
2301
2302         while (rd_point != wr_point) {
2303                 struct hisi_sas_complete_v3_hdr *complete_hdr;
2304                 struct device *dev = hisi_hba->dev;
2305                 u32 dw1;
2306                 int iptt;
2307
2308                 complete_hdr = &complete_queue[rd_point];
2309                 dw1 = le32_to_cpu(complete_hdr->dw1);
2310
2311                 iptt = dw1 & CMPLT_HDR_IPTT_MSK;
2312                 if (likely(iptt < HISI_SAS_COMMAND_ENTRIES_V3_HW)) {
2313                         slot = &hisi_hba->slot_info[iptt];
2314                         slot->cmplt_queue_slot = rd_point;
2315                         slot->cmplt_queue = queue;
2316                         slot_complete_v3_hw(hisi_hba, slot);
2317                 } else
2318                         dev_err(dev, "IPTT %d is invalid, discard it.\n", iptt);
2319
2320                 if (++rd_point >= HISI_SAS_QUEUE_SLOTS)
2321                         rd_point = 0;
2322         }
2323
2324         /* update rd_point */
2325         cq->rd_point = rd_point;
2326         hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
2327 }
2328
2329 static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p)
2330 {
2331         struct hisi_sas_cq *cq = p;
2332         struct hisi_hba *hisi_hba = cq->hisi_hba;
2333         int queue = cq->id;
2334
2335         hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue);
2336
2337         tasklet_schedule(&cq->tasklet);
2338
2339         return IRQ_HANDLED;
2340 }
2341
2342 static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs)
2343 {
2344         const struct cpumask *mask;
2345         int queue, cpu;
2346
2347         for (queue = 0; queue < nvecs; queue++) {
2348                 struct hisi_sas_cq *cq = &hisi_hba->cq[queue];
2349
2350                 mask = pci_irq_get_affinity(hisi_hba->pci_dev, queue +
2351                                             BASE_VECTORS_V3_HW);
2352                 if (!mask)
2353                         goto fallback;
2354                 cq->pci_irq_mask = mask;
2355                 for_each_cpu(cpu, mask)
2356                         hisi_hba->reply_map[cpu] = queue;
2357         }
2358         return;
2359
2360 fallback:
2361         for_each_possible_cpu(cpu)
2362                 hisi_hba->reply_map[cpu] = cpu % hisi_hba->queue_count;
2363         /* Don't clean all CQ masks */
2364 }
2365
2366 static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
2367 {
2368         struct device *dev = hisi_hba->dev;
2369         struct pci_dev *pdev = hisi_hba->pci_dev;
2370         int vectors, rc;
2371         int i, k;
2372         int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi;
2373
2374         if (auto_affine_msi_experimental) {
2375                 struct irq_affinity desc = {
2376                         .pre_vectors = BASE_VECTORS_V3_HW,
2377                 };
2378
2379                 min_msi = MIN_AFFINE_VECTORS_V3_HW;
2380
2381                 hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids,
2382                                                    sizeof(unsigned int),
2383                                                    GFP_KERNEL);
2384                 if (!hisi_hba->reply_map)
2385                         return -ENOMEM;
2386                 vectors = pci_alloc_irq_vectors_affinity(hisi_hba->pci_dev,
2387                                                          min_msi, max_msi,
2388                                                          PCI_IRQ_MSI |
2389                                                          PCI_IRQ_AFFINITY,
2390                                                          &desc);
2391                 if (vectors < 0)
2392                         return -ENOENT;
2393                 setup_reply_map_v3_hw(hisi_hba, vectors - BASE_VECTORS_V3_HW);
2394         } else {
2395                 min_msi = max_msi;
2396                 vectors = pci_alloc_irq_vectors(hisi_hba->pci_dev, min_msi,
2397                                                 max_msi, PCI_IRQ_MSI);
2398                 if (vectors < 0)
2399                         return vectors;
2400         }
2401
2402         hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
2403
2404         rc = devm_request_irq(dev, pci_irq_vector(pdev, 1),
2405                               int_phy_up_down_bcast_v3_hw, 0,
2406                               DRV_NAME " phy", hisi_hba);
2407         if (rc) {
2408                 dev_err(dev, "could not request phy interrupt, rc=%d\n", rc);
2409                 rc = -ENOENT;
2410                 goto free_irq_vectors;
2411         }
2412
2413         rc = devm_request_irq(dev, pci_irq_vector(pdev, 2),
2414                               int_chnl_int_v3_hw, 0,
2415                               DRV_NAME " channel", hisi_hba);
2416         if (rc) {
2417                 dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc);
2418                 rc = -ENOENT;
2419                 goto free_phy_irq;
2420         }
2421
2422         rc = devm_request_irq(dev, pci_irq_vector(pdev, 11),
2423                               fatal_axi_int_v3_hw, 0,
2424                               DRV_NAME " fatal", hisi_hba);
2425         if (rc) {
2426                 dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc);
2427                 rc = -ENOENT;
2428                 goto free_chnl_interrupt;
2429         }
2430
2431         /* Init tasklets for cq only */
2432         for (i = 0; i < hisi_hba->cq_nvecs; i++) {
2433                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
2434                 struct tasklet_struct *t = &cq->tasklet;
2435                 int nr = hisi_sas_intr_conv ? 16 : 16 + i;
2436                 unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0;
2437
2438                 rc = devm_request_irq(dev, pci_irq_vector(pdev, nr),
2439                                       cq_interrupt_v3_hw, irqflags,
2440                                       DRV_NAME " cq", cq);
2441                 if (rc) {
2442                         dev_err(dev, "could not request cq%d interrupt, rc=%d\n",
2443                                 i, rc);
2444                         rc = -ENOENT;
2445                         goto free_cq_irqs;
2446                 }
2447
2448                 tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq);
2449         }
2450
2451         return 0;
2452
2453 free_cq_irqs:
2454         for (k = 0; k < i; k++) {
2455                 struct hisi_sas_cq *cq = &hisi_hba->cq[k];
2456                 int nr = hisi_sas_intr_conv ? 16 : 16 + k;
2457
2458                 free_irq(pci_irq_vector(pdev, nr), cq);
2459         }
2460         free_irq(pci_irq_vector(pdev, 11), hisi_hba);
2461 free_chnl_interrupt:
2462         free_irq(pci_irq_vector(pdev, 2), hisi_hba);
2463 free_phy_irq:
2464         free_irq(pci_irq_vector(pdev, 1), hisi_hba);
2465 free_irq_vectors:
2466         pci_free_irq_vectors(pdev);
2467         return rc;
2468 }
2469
2470 static int hisi_sas_v3_init(struct hisi_hba *hisi_hba)
2471 {
2472         int rc;
2473
2474         rc = hw_init_v3_hw(hisi_hba);
2475         if (rc)
2476                 return rc;
2477
2478         rc = interrupt_init_v3_hw(hisi_hba);
2479         if (rc)
2480                 return rc;
2481
2482         return 0;
2483 }
2484
2485 static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
2486                 struct sas_phy_linkrates *r)
2487 {
2488         enum sas_linkrate max = r->maximum_linkrate;
2489         u32 prog_phy_link_rate = 0x800;
2490
2491         prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
2492         hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
2493                              prog_phy_link_rate);
2494 }
2495
2496 static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
2497 {
2498         struct pci_dev *pdev = hisi_hba->pci_dev;
2499         int i;
2500
2501         synchronize_irq(pci_irq_vector(pdev, 1));
2502         synchronize_irq(pci_irq_vector(pdev, 2));
2503         synchronize_irq(pci_irq_vector(pdev, 11));
2504         for (i = 0; i < hisi_hba->queue_count; i++) {
2505                 hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
2506                 synchronize_irq(pci_irq_vector(pdev, i + 16));
2507         }
2508
2509         hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
2510         hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
2511         hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffffffff);
2512         hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff);
2513
2514         for (i = 0; i < hisi_hba->n_phy; i++) {
2515                 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1_MSK, 0xffffffff);
2516                 hisi_sas_phy_write32(hisi_hba, i, CHL_INT2_MSK, 0xffffffff);
2517                 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_NOT_RDY_MSK, 0x1);
2518                 hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_PHY_ENA_MSK, 0x1);
2519                 hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x1);
2520         }
2521 }
2522
2523 static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba)
2524 {
2525         return hisi_sas_read32(hisi_hba, PHY_STATE);
2526 }
2527
2528 static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
2529 {
2530         struct device *dev = hisi_hba->dev;
2531         u32 status, reg_val;
2532         int rc;
2533
2534         interrupt_disable_v3_hw(hisi_hba);
2535         hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
2536         hisi_sas_kill_tasklets(hisi_hba);
2537
2538         hisi_sas_stop_phys(hisi_hba);
2539
2540         mdelay(10);
2541
2542         reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
2543                                   AM_CTRL_GLOBAL);
2544         reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK;
2545         hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
2546                          AM_CTRL_GLOBAL, reg_val);
2547
2548         /* wait until bus idle */
2549         rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
2550                                           AM_CURR_TRANS_RETURN, status,
2551                                           status == 0x3, 10, 100);
2552         if (rc) {
2553                 dev_err(dev, "axi bus is not idle, rc=%d\n", rc);
2554                 return rc;
2555         }
2556
2557         return 0;
2558 }
2559
2560 static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
2561 {
2562         struct device *dev = hisi_hba->dev;
2563         int rc;
2564
2565         rc = disable_host_v3_hw(hisi_hba);
2566         if (rc) {
2567                 dev_err(dev, "soft reset: disable host failed rc=%d\n", rc);
2568                 return rc;
2569         }
2570
2571         hisi_sas_init_mem(hisi_hba);
2572
2573         return hw_init_v3_hw(hisi_hba);
2574 }
2575
2576 static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type,
2577                         u8 reg_index, u8 reg_count, u8 *write_data)
2578 {
2579         struct device *dev = hisi_hba->dev;
2580         u32 *data = (u32 *)write_data;
2581         int i;
2582
2583         switch (reg_type) {
2584         case SAS_GPIO_REG_TX:
2585                 if ((reg_index + reg_count) > ((hisi_hba->n_phy + 3) / 4)) {
2586                         dev_err(dev, "write gpio: invalid reg range[%d, %d]\n",
2587                                 reg_index, reg_index + reg_count - 1);
2588                         return -EINVAL;
2589                 }
2590
2591                 for (i = 0; i < reg_count; i++)
2592                         hisi_sas_write32(hisi_hba,
2593                                          SAS_GPIO_TX_0_1 + (reg_index + i) * 4,
2594                                          data[i]);
2595                 break;
2596         default:
2597                 dev_err(dev, "write gpio: unsupported or bad reg type %d\n",
2598                         reg_type);
2599                 return -EINVAL;
2600         }
2601
2602         return 0;
2603 }
2604
2605 static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba,
2606                                             int delay_ms, int timeout_ms)
2607 {
2608         struct device *dev = hisi_hba->dev;
2609         int entries, entries_old = 0, time;
2610
2611         for (time = 0; time < timeout_ms; time += delay_ms) {
2612                 entries = hisi_sas_read32(hisi_hba, CQE_SEND_CNT);
2613                 if (entries == entries_old)
2614                         break;
2615
2616                 entries_old = entries;
2617                 msleep(delay_ms);
2618         }
2619
2620         if (time >= timeout_ms)
2621                 return -ETIMEDOUT;
2622
2623         dev_dbg(dev, "wait commands complete %dms\n", time);
2624
2625         return 0;
2626 }
2627
2628 static ssize_t intr_conv_v3_hw_show(struct device *dev,
2629                                     struct device_attribute *attr, char *buf)
2630 {
2631         return scnprintf(buf, PAGE_SIZE, "%u\n", hisi_sas_intr_conv);
2632 }
2633 static DEVICE_ATTR_RO(intr_conv_v3_hw);
2634
2635 static void config_intr_coal_v3_hw(struct hisi_hba *hisi_hba)
2636 {
2637         /* config those registers between enable and disable PHYs */
2638         hisi_sas_stop_phys(hisi_hba);
2639
2640         if (hisi_hba->intr_coal_ticks == 0 ||
2641             hisi_hba->intr_coal_count == 0) {
2642                 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
2643                 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
2644                 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
2645         } else {
2646                 hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x3);
2647                 hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME,
2648                                  hisi_hba->intr_coal_ticks);
2649                 hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT,
2650                                  hisi_hba->intr_coal_count);
2651         }
2652         phys_init_v3_hw(hisi_hba);
2653 }
2654
2655 static ssize_t intr_coal_ticks_v3_hw_show(struct device *dev,
2656                                           struct device_attribute *attr,
2657                                           char *buf)
2658 {
2659         struct Scsi_Host *shost = class_to_shost(dev);
2660         struct hisi_hba *hisi_hba = shost_priv(shost);
2661
2662         return scnprintf(buf, PAGE_SIZE, "%u\n",
2663                          hisi_hba->intr_coal_ticks);
2664 }
2665
2666 static ssize_t intr_coal_ticks_v3_hw_store(struct device *dev,
2667                                            struct device_attribute *attr,
2668                                            const char *buf, size_t count)
2669 {
2670         struct Scsi_Host *shost = class_to_shost(dev);
2671         struct hisi_hba *hisi_hba = shost_priv(shost);
2672         u32 intr_coal_ticks;
2673         int ret;
2674
2675         ret = kstrtou32(buf, 10, &intr_coal_ticks);
2676         if (ret) {
2677                 dev_err(dev, "Input data of interrupt coalesce unmatch\n");
2678                 return -EINVAL;
2679         }
2680
2681         if (intr_coal_ticks >= BIT(24)) {
2682                 dev_err(dev, "intr_coal_ticks must be less than 2^24!\n");
2683                 return -EINVAL;
2684         }
2685
2686         hisi_hba->intr_coal_ticks = intr_coal_ticks;
2687
2688         config_intr_coal_v3_hw(hisi_hba);
2689
2690         return count;
2691 }
2692 static DEVICE_ATTR_RW(intr_coal_ticks_v3_hw);
2693
2694 static ssize_t intr_coal_count_v3_hw_show(struct device *dev,
2695                                           struct device_attribute
2696                                           *attr, char *buf)
2697 {
2698         struct Scsi_Host *shost = class_to_shost(dev);
2699         struct hisi_hba *hisi_hba = shost_priv(shost);
2700
2701         return scnprintf(buf, PAGE_SIZE, "%u\n",
2702                          hisi_hba->intr_coal_count);
2703 }
2704
2705 static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
2706                 struct device_attribute
2707                 *attr, const char *buf, size_t count)
2708 {
2709         struct Scsi_Host *shost = class_to_shost(dev);
2710         struct hisi_hba *hisi_hba = shost_priv(shost);
2711         u32 intr_coal_count;
2712         int ret;
2713
2714         ret = kstrtou32(buf, 10, &intr_coal_count);
2715         if (ret) {
2716                 dev_err(dev, "Input data of interrupt coalesce unmatch\n");
2717                 return -EINVAL;
2718         }
2719
2720         if (intr_coal_count >= BIT(8)) {
2721                 dev_err(dev, "intr_coal_count must be less than 2^8!\n");
2722                 return -EINVAL;
2723         }
2724
2725         hisi_hba->intr_coal_count = intr_coal_count;
2726
2727         config_intr_coal_v3_hw(hisi_hba);
2728
2729         return count;
2730 }
2731 static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
2732
2733 static struct device_attribute *host_attrs_v3_hw[] = {
2734         &dev_attr_phy_event_threshold,
2735         &dev_attr_intr_conv_v3_hw,
2736         &dev_attr_intr_coal_ticks_v3_hw,
2737         &dev_attr_intr_coal_count_v3_hw,
2738         NULL
2739 };
2740
2741 static const struct hisi_sas_debugfs_reg_lu debugfs_port_reg_lu[] = {
2742         HISI_SAS_DEBUGFS_REG(PHY_CFG),
2743         HISI_SAS_DEBUGFS_REG(HARD_PHY_LINKRATE),
2744         HISI_SAS_DEBUGFS_REG(PROG_PHY_LINK_RATE),
2745         HISI_SAS_DEBUGFS_REG(PHY_CTRL),
2746         HISI_SAS_DEBUGFS_REG(SL_CFG),
2747         HISI_SAS_DEBUGFS_REG(AIP_LIMIT),
2748         HISI_SAS_DEBUGFS_REG(SL_CONTROL),
2749         HISI_SAS_DEBUGFS_REG(RX_PRIMS_STATUS),
2750         HISI_SAS_DEBUGFS_REG(TX_ID_DWORD0),
2751         HISI_SAS_DEBUGFS_REG(TX_ID_DWORD1),
2752         HISI_SAS_DEBUGFS_REG(TX_ID_DWORD2),
2753         HISI_SAS_DEBUGFS_REG(TX_ID_DWORD3),
2754         HISI_SAS_DEBUGFS_REG(TX_ID_DWORD4),
2755         HISI_SAS_DEBUGFS_REG(TX_ID_DWORD5),
2756         HISI_SAS_DEBUGFS_REG(TX_ID_DWORD6),
2757         HISI_SAS_DEBUGFS_REG(TXID_AUTO),
2758         HISI_SAS_DEBUGFS_REG(RX_IDAF_DWORD0),
2759         HISI_SAS_DEBUGFS_REG(RXOP_CHECK_CFG_H),
2760         HISI_SAS_DEBUGFS_REG(STP_LINK_TIMER),
2761         HISI_SAS_DEBUGFS_REG(STP_LINK_TIMEOUT_STATE),
2762         HISI_SAS_DEBUGFS_REG(CON_CFG_DRIVER),
2763         HISI_SAS_DEBUGFS_REG(SAS_SSP_CON_TIMER_CFG),
2764         HISI_SAS_DEBUGFS_REG(SAS_SMP_CON_TIMER_CFG),
2765         HISI_SAS_DEBUGFS_REG(SAS_STP_CON_TIMER_CFG),
2766         HISI_SAS_DEBUGFS_REG(CHL_INT0),
2767         HISI_SAS_DEBUGFS_REG(CHL_INT1),
2768         HISI_SAS_DEBUGFS_REG(CHL_INT2),
2769         HISI_SAS_DEBUGFS_REG(CHL_INT0_MSK),
2770         HISI_SAS_DEBUGFS_REG(CHL_INT1_MSK),
2771         HISI_SAS_DEBUGFS_REG(CHL_INT2_MSK),
2772         HISI_SAS_DEBUGFS_REG(SAS_EC_INT_COAL_TIME),
2773         HISI_SAS_DEBUGFS_REG(CHL_INT_COAL_EN),
2774         HISI_SAS_DEBUGFS_REG(SAS_RX_TRAIN_TIMER),
2775         HISI_SAS_DEBUGFS_REG(PHY_CTRL_RDY_MSK),
2776         HISI_SAS_DEBUGFS_REG(PHYCTRL_NOT_RDY_MSK),
2777         HISI_SAS_DEBUGFS_REG(PHYCTRL_DWS_RESET_MSK),
2778         HISI_SAS_DEBUGFS_REG(PHYCTRL_PHY_ENA_MSK),
2779         HISI_SAS_DEBUGFS_REG(SL_RX_BCAST_CHK_MSK),
2780         HISI_SAS_DEBUGFS_REG(PHYCTRL_OOB_RESTART_MSK),
2781         HISI_SAS_DEBUGFS_REG(DMA_TX_STATUS),
2782         HISI_SAS_DEBUGFS_REG(DMA_RX_STATUS),
2783         HISI_SAS_DEBUGFS_REG(COARSETUNE_TIME),
2784         HISI_SAS_DEBUGFS_REG(ERR_CNT_DWS_LOST),
2785         HISI_SAS_DEBUGFS_REG(ERR_CNT_RESET_PROB),
2786         HISI_SAS_DEBUGFS_REG(ERR_CNT_INVLD_DW),
2787         HISI_SAS_DEBUGFS_REG(ERR_CNT_CODE_ERR),
2788         HISI_SAS_DEBUGFS_REG(ERR_CNT_DISP_ERR),
2789         {}
2790 };
2791
2792 static const struct hisi_sas_debugfs_reg debugfs_port_reg = {
2793         .lu = debugfs_port_reg_lu,
2794         .count = 0x100,
2795         .base_off = PORT_BASE,
2796         .read_port_reg = hisi_sas_phy_read32,
2797 };
2798
2799 static const struct hisi_sas_debugfs_reg_lu debugfs_global_reg_lu[] = {
2800         HISI_SAS_DEBUGFS_REG(DLVRY_QUEUE_ENABLE),
2801         HISI_SAS_DEBUGFS_REG(PHY_CONTEXT),
2802         HISI_SAS_DEBUGFS_REG(PHY_STATE),
2803         HISI_SAS_DEBUGFS_REG(PHY_PORT_NUM_MA),
2804         HISI_SAS_DEBUGFS_REG(PHY_CONN_RATE),
2805         HISI_SAS_DEBUGFS_REG(ITCT_CLR),
2806         HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_LO),
2807         HISI_SAS_DEBUGFS_REG(IO_SATA_BROKEN_MSG_ADDR_HI),
2808         HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_LO),
2809         HISI_SAS_DEBUGFS_REG(SATA_INITI_D2H_STORE_ADDR_HI),
2810         HISI_SAS_DEBUGFS_REG(CFG_MAX_TAG),
2811         HISI_SAS_DEBUGFS_REG(HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL),
2812         HISI_SAS_DEBUGFS_REG(HGC_SAS_TXFAIL_RETRY_CTRL),
2813         HISI_SAS_DEBUGFS_REG(HGC_GET_ITV_TIME),
2814         HISI_SAS_DEBUGFS_REG(DEVICE_MSG_WORK_MODE),
2815         HISI_SAS_DEBUGFS_REG(OPENA_WT_CONTI_TIME),
2816         HISI_SAS_DEBUGFS_REG(I_T_NEXUS_LOSS_TIME),
2817         HISI_SAS_DEBUGFS_REG(MAX_CON_TIME_LIMIT_TIME),
2818         HISI_SAS_DEBUGFS_REG(BUS_INACTIVE_LIMIT_TIME),
2819         HISI_SAS_DEBUGFS_REG(REJECT_TO_OPEN_LIMIT_TIME),
2820         HISI_SAS_DEBUGFS_REG(CQ_INT_CONVERGE_EN),
2821         HISI_SAS_DEBUGFS_REG(CFG_AGING_TIME),
2822         HISI_SAS_DEBUGFS_REG(HGC_DFX_CFG2),
2823         HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_QUERY_IPTT),
2824         HISI_SAS_DEBUGFS_REG(CFG_ABT_SET_IPTT_DONE),
2825         HISI_SAS_DEBUGFS_REG(HGC_IOMB_PROC1_STATUS),
2826         HISI_SAS_DEBUGFS_REG(CHNL_INT_STATUS),
2827         HISI_SAS_DEBUGFS_REG(HGC_AXI_FIFO_ERR_INFO),
2828         HISI_SAS_DEBUGFS_REG(INT_COAL_EN),
2829         HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_TIME),
2830         HISI_SAS_DEBUGFS_REG(OQ_INT_COAL_CNT),
2831         HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_TIME),
2832         HISI_SAS_DEBUGFS_REG(ENT_INT_COAL_CNT),
2833         HISI_SAS_DEBUGFS_REG(OQ_INT_SRC),
2834         HISI_SAS_DEBUGFS_REG(OQ_INT_SRC_MSK),
2835         HISI_SAS_DEBUGFS_REG(ENT_INT_SRC1),
2836         HISI_SAS_DEBUGFS_REG(ENT_INT_SRC2),
2837         HISI_SAS_DEBUGFS_REG(ENT_INT_SRC3),
2838         HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK1),
2839         HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK2),
2840         HISI_SAS_DEBUGFS_REG(ENT_INT_SRC_MSK3),
2841         HISI_SAS_DEBUGFS_REG(CHNL_PHYUPDOWN_INT_MSK),
2842         HISI_SAS_DEBUGFS_REG(CHNL_ENT_INT_MSK),
2843         HISI_SAS_DEBUGFS_REG(HGC_COM_INT_MSK),
2844         HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR),
2845         HISI_SAS_DEBUGFS_REG(SAS_ECC_INTR_MSK),
2846         HISI_SAS_DEBUGFS_REG(HGC_ERR_STAT_EN),
2847         HISI_SAS_DEBUGFS_REG(CQE_SEND_CNT),
2848         HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_DEPTH),
2849         HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_WR_PTR),
2850         HISI_SAS_DEBUGFS_REG(DLVRY_Q_0_RD_PTR),
2851         HISI_SAS_DEBUGFS_REG(HYPER_STREAM_ID_EN_CFG),
2852         HISI_SAS_DEBUGFS_REG(OQ0_INT_SRC_MSK),
2853         HISI_SAS_DEBUGFS_REG(COMPL_Q_0_DEPTH),
2854         HISI_SAS_DEBUGFS_REG(COMPL_Q_0_WR_PTR),
2855         HISI_SAS_DEBUGFS_REG(COMPL_Q_0_RD_PTR),
2856         HISI_SAS_DEBUGFS_REG(AWQOS_AWCACHE_CFG),
2857         HISI_SAS_DEBUGFS_REG(ARQOS_ARCACHE_CFG),
2858         HISI_SAS_DEBUGFS_REG(HILINK_ERR_DFX),
2859         HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_0),
2860         HISI_SAS_DEBUGFS_REG(SAS_GPIO_CFG_1),
2861         HISI_SAS_DEBUGFS_REG(SAS_GPIO_TX_0_1),
2862         HISI_SAS_DEBUGFS_REG(SAS_CFG_DRIVE_VLD),
2863         {}
2864 };
2865
2866 static const struct hisi_sas_debugfs_reg debugfs_global_reg = {
2867         .lu = debugfs_global_reg_lu,
2868         .count = 0x800,
2869         .read_global_reg = hisi_sas_read32,
2870 };
2871
2872 static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba)
2873 {
2874         struct device *dev = hisi_hba->dev;
2875
2876         set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
2877
2878         hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0);
2879
2880         if (wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000) == -ETIMEDOUT)
2881                 dev_dbg(dev, "Wait commands complete timeout!\n");
2882
2883         hisi_sas_kill_tasklets(hisi_hba);
2884 }
2885
2886 static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba)
2887 {
2888         hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
2889                          (u32)((1ULL << hisi_hba->queue_count) - 1));
2890
2891         clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
2892 }
2893
2894 static struct scsi_host_template sht_v3_hw = {
2895         .name                   = DRV_NAME,
2896         .module                 = THIS_MODULE,
2897         .queuecommand           = sas_queuecommand,
2898         .target_alloc           = sas_target_alloc,
2899         .slave_configure        = hisi_sas_slave_configure,
2900         .scan_finished          = hisi_sas_scan_finished,
2901         .scan_start             = hisi_sas_scan_start,
2902         .change_queue_depth     = sas_change_queue_depth,
2903         .bios_param             = sas_bios_param,
2904         .this_id                = -1,
2905         .sg_tablesize           = HISI_SAS_SGE_PAGE_CNT,
2906         .sg_prot_tablesize      = HISI_SAS_SGE_PAGE_CNT,
2907         .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
2908         .eh_device_reset_handler = sas_eh_device_reset_handler,
2909         .eh_target_reset_handler = sas_eh_target_reset_handler,
2910         .target_destroy         = sas_target_destroy,
2911         .ioctl                  = sas_ioctl,
2912         .shost_attrs            = host_attrs_v3_hw,
2913         .tag_alloc_policy       = BLK_TAG_ALLOC_RR,
2914         .host_reset             = hisi_sas_host_reset,
2915 };
2916
2917 static const struct hisi_sas_hw hisi_sas_v3_hw = {
2918         .hw_init = hisi_sas_v3_init,
2919         .setup_itct = setup_itct_v3_hw,
2920         .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW,
2921         .get_wideport_bitmap = get_wideport_bitmap_v3_hw,
2922         .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr),
2923         .clear_itct = clear_itct_v3_hw,
2924         .sl_notify_ssp = sl_notify_ssp_v3_hw,
2925         .prep_ssp = prep_ssp_v3_hw,
2926         .prep_smp = prep_smp_v3_hw,
2927         .prep_stp = prep_ata_v3_hw,
2928         .prep_abort = prep_abort_v3_hw,
2929         .get_free_slot = get_free_slot_v3_hw,
2930         .start_delivery = start_delivery_v3_hw,
2931         .slot_complete = slot_complete_v3_hw,
2932         .phys_init = phys_init_v3_hw,
2933         .phy_start = start_phy_v3_hw,
2934         .phy_disable = disable_phy_v3_hw,
2935         .phy_hard_reset = phy_hard_reset_v3_hw,
2936         .phy_get_max_linkrate = phy_get_max_linkrate_v3_hw,
2937         .phy_set_linkrate = phy_set_linkrate_v3_hw,
2938         .dereg_device = dereg_device_v3_hw,
2939         .soft_reset = soft_reset_v3_hw,
2940         .get_phys_state = get_phys_state_v3_hw,
2941         .get_events = phy_get_events_v3_hw,
2942         .write_gpio = write_gpio_v3_hw,
2943         .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw,
2944         .debugfs_reg_global = &debugfs_global_reg,
2945         .debugfs_reg_port = &debugfs_port_reg,
2946         .snapshot_prepare = debugfs_snapshot_prepare_v3_hw,
2947         .snapshot_restore = debugfs_snapshot_restore_v3_hw,
2948 };
2949
2950 static struct Scsi_Host *
2951 hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
2952 {
2953         struct Scsi_Host *shost;
2954         struct hisi_hba *hisi_hba;
2955         struct device *dev = &pdev->dev;
2956
2957         shost = scsi_host_alloc(&sht_v3_hw, sizeof(*hisi_hba));
2958         if (!shost) {
2959                 dev_err(dev, "shost alloc failed\n");
2960                 return NULL;
2961         }
2962         hisi_hba = shost_priv(shost);
2963
2964         INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
2965         INIT_WORK(&hisi_hba->debugfs_work, hisi_sas_debugfs_work_handler);
2966         hisi_hba->hw = &hisi_sas_v3_hw;
2967         hisi_hba->pci_dev = pdev;
2968         hisi_hba->dev = dev;
2969         hisi_hba->shost = shost;
2970         SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
2971
2972         if (prot_mask & ~HISI_SAS_PROT_MASK)
2973                 dev_err(dev, "unsupported protection mask 0x%x, using default (0x0)\n",
2974                         prot_mask);
2975         else
2976                 hisi_hba->prot_mask = prot_mask;
2977
2978         timer_setup(&hisi_hba->timer, NULL, 0);
2979
2980         if (hisi_sas_get_fw_info(hisi_hba) < 0)
2981                 goto err_out;
2982
2983         if (hisi_sas_alloc(hisi_hba)) {
2984                 hisi_sas_free(hisi_hba);
2985                 goto err_out;
2986         }
2987
2988         return shost;
2989 err_out:
2990         scsi_host_put(shost);
2991         dev_err(dev, "shost alloc failed\n");
2992         return NULL;
2993 }
2994
2995 static int
2996 hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2997 {
2998         struct Scsi_Host *shost;
2999         struct hisi_hba *hisi_hba;
3000         struct device *dev = &pdev->dev;
3001         struct asd_sas_phy **arr_phy;
3002         struct asd_sas_port **arr_port;
3003         struct sas_ha_struct *sha;
3004         int rc, phy_nr, port_nr, i;
3005
3006         rc = pci_enable_device(pdev);
3007         if (rc)
3008                 goto err_out;
3009
3010         pci_set_master(pdev);
3011
3012         rc = pci_request_regions(pdev, DRV_NAME);
3013         if (rc)
3014                 goto err_out_disable_device;
3015
3016         rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3017         if (rc)
3018                 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3019         if (rc) {
3020                 dev_err(dev, "No usable DMA addressing method\n");
3021                 rc = -ENODEV;
3022                 goto err_out_regions;
3023         }
3024
3025         shost = hisi_sas_shost_alloc_pci(pdev);
3026         if (!shost) {
3027                 rc = -ENOMEM;
3028                 goto err_out_regions;
3029         }
3030
3031         sha = SHOST_TO_SAS_HA(shost);
3032         hisi_hba = shost_priv(shost);
3033         dev_set_drvdata(dev, sha);
3034
3035         hisi_hba->regs = pcim_iomap(pdev, 5, 0);
3036         if (!hisi_hba->regs) {
3037                 dev_err(dev, "cannot map register\n");
3038                 rc = -ENOMEM;
3039                 goto err_out_ha;
3040         }
3041
3042         phy_nr = port_nr = hisi_hba->n_phy;
3043
3044         arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
3045         arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
3046         if (!arr_phy || !arr_port) {
3047                 rc = -ENOMEM;
3048                 goto err_out_ha;
3049         }
3050
3051         sha->sas_phy = arr_phy;
3052         sha->sas_port = arr_port;
3053         sha->core.shost = shost;
3054         sha->lldd_ha = hisi_hba;
3055
3056         shost->transportt = hisi_sas_stt;
3057         shost->max_id = HISI_SAS_MAX_DEVICES;
3058         shost->max_lun = ~0;
3059         shost->max_channel = 1;
3060         shost->max_cmd_len = 16;
3061         shost->can_queue = hisi_hba->hw->max_command_entries -
3062                 HISI_SAS_RESERVED_IPTT_CNT;
3063         shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
3064                 HISI_SAS_RESERVED_IPTT_CNT;
3065
3066         sha->sas_ha_name = DRV_NAME;
3067         sha->dev = dev;
3068         sha->lldd_module = THIS_MODULE;
3069         sha->sas_addr = &hisi_hba->sas_addr[0];
3070         sha->num_phys = hisi_hba->n_phy;
3071         sha->core.shost = hisi_hba->shost;
3072
3073         for (i = 0; i < hisi_hba->n_phy; i++) {
3074                 sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
3075                 sha->sas_port[i] = &hisi_hba->port[i].sas_port;
3076         }
3077
3078         if (hisi_hba->prot_mask) {
3079                 dev_info(dev, "Registering for DIF/DIX prot_mask=0x%x\n",
3080                          prot_mask);
3081                 scsi_host_set_prot(hisi_hba->shost, prot_mask);
3082                 if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
3083                         scsi_host_set_guard(hisi_hba->shost,
3084                                             SHOST_DIX_GUARD_CRC);
3085         }
3086
3087         if (hisi_sas_debugfs_enable)
3088                 hisi_sas_debugfs_init(hisi_hba);
3089
3090         rc = scsi_add_host(shost, dev);
3091         if (rc)
3092                 goto err_out_ha;
3093
3094         rc = sas_register_ha(sha);
3095         if (rc)
3096                 goto err_out_register_ha;
3097
3098         rc = hisi_hba->hw->hw_init(hisi_hba);
3099         if (rc)
3100                 goto err_out_register_ha;
3101
3102         scsi_scan_host(shost);
3103
3104         return 0;
3105
3106 err_out_register_ha:
3107         scsi_remove_host(shost);
3108 err_out_ha:
3109         scsi_host_put(shost);
3110 err_out_regions:
3111         pci_release_regions(pdev);
3112 err_out_disable_device:
3113         pci_disable_device(pdev);
3114 err_out:
3115         return rc;
3116 }
3117
3118 static void
3119 hisi_sas_v3_destroy_irqs(struct pci_dev *pdev, struct hisi_hba *hisi_hba)
3120 {
3121         int i;
3122
3123         free_irq(pci_irq_vector(pdev, 1), hisi_hba);
3124         free_irq(pci_irq_vector(pdev, 2), hisi_hba);
3125         free_irq(pci_irq_vector(pdev, 11), hisi_hba);
3126         for (i = 0; i < hisi_hba->cq_nvecs; i++) {
3127                 struct hisi_sas_cq *cq = &hisi_hba->cq[i];
3128                 int nr = hisi_sas_intr_conv ? 16 : 16 + i;
3129
3130                 free_irq(pci_irq_vector(pdev, nr), cq);
3131         }
3132         pci_free_irq_vectors(pdev);
3133 }
3134
3135 static void hisi_sas_v3_remove(struct pci_dev *pdev)
3136 {
3137         struct device *dev = &pdev->dev;
3138         struct sas_ha_struct *sha = dev_get_drvdata(dev);
3139         struct hisi_hba *hisi_hba = sha->lldd_ha;
3140         struct Scsi_Host *shost = sha->core.shost;
3141
3142         hisi_sas_debugfs_exit(hisi_hba);
3143
3144         if (timer_pending(&hisi_hba->timer))
3145                 del_timer(&hisi_hba->timer);
3146
3147         sas_unregister_ha(sha);
3148         sas_remove_host(sha->core.shost);
3149
3150         hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
3151         hisi_sas_kill_tasklets(hisi_hba);
3152         pci_release_regions(pdev);
3153         pci_disable_device(pdev);
3154         hisi_sas_free(hisi_hba);
3155         scsi_host_put(shost);
3156 }
3157
3158 static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
3159 {
3160         struct sas_ha_struct *sha = pci_get_drvdata(pdev);
3161         struct hisi_hba *hisi_hba = sha->lldd_ha;
3162         struct device *dev = hisi_hba->dev;
3163         int rc;
3164
3165         dev_info(dev, "FLR prepare\n");
3166         set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
3167         hisi_sas_controller_reset_prepare(hisi_hba);
3168
3169         rc = disable_host_v3_hw(hisi_hba);
3170         if (rc)
3171                 dev_err(dev, "FLR: disable host failed rc=%d\n", rc);
3172 }
3173
3174 static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
3175 {
3176         struct sas_ha_struct *sha = pci_get_drvdata(pdev);
3177         struct hisi_hba *hisi_hba = sha->lldd_ha;
3178         struct device *dev = hisi_hba->dev;
3179         int rc;
3180
3181         hisi_sas_init_mem(hisi_hba);
3182
3183         rc = hw_init_v3_hw(hisi_hba);
3184         if (rc) {
3185                 dev_err(dev, "FLR: hw init failed rc=%d\n", rc);
3186                 return;
3187         }
3188
3189         hisi_sas_controller_reset_done(hisi_hba);
3190         dev_info(dev, "FLR done\n");
3191 }
3192
3193 enum {
3194         /* instances of the controller */
3195         hip08,
3196 };
3197
3198 static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
3199 {
3200         struct sas_ha_struct *sha = pci_get_drvdata(pdev);
3201         struct hisi_hba *hisi_hba = sha->lldd_ha;
3202         struct device *dev = hisi_hba->dev;
3203         struct Scsi_Host *shost = hisi_hba->shost;
3204         pci_power_t device_state;
3205         int rc;
3206
3207         if (!pdev->pm_cap) {
3208                 dev_err(dev, "PCI PM not supported\n");
3209                 return -ENODEV;
3210         }
3211
3212         if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
3213                 return -1;
3214
3215         scsi_block_requests(shost);
3216         set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
3217         flush_workqueue(hisi_hba->wq);
3218
3219         rc = disable_host_v3_hw(hisi_hba);
3220         if (rc) {
3221                 dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
3222                 clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
3223                 clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
3224                 scsi_unblock_requests(shost);
3225                 return rc;
3226         }
3227
3228         hisi_sas_init_mem(hisi_hba);
3229
3230         device_state = pci_choose_state(pdev, state);
3231         dev_warn(dev, "entering operating state [D%d]\n",
3232                         device_state);
3233         pci_save_state(pdev);
3234         pci_disable_device(pdev);
3235         pci_set_power_state(pdev, device_state);
3236
3237         hisi_sas_release_tasks(hisi_hba);
3238
3239         sas_suspend_ha(sha);
3240         return 0;
3241 }
3242
3243 static int hisi_sas_v3_resume(struct pci_dev *pdev)
3244 {
3245         struct sas_ha_struct *sha = pci_get_drvdata(pdev);
3246         struct hisi_hba *hisi_hba = sha->lldd_ha;
3247         struct Scsi_Host *shost = hisi_hba->shost;
3248         struct device *dev = hisi_hba->dev;
3249         unsigned int rc;
3250         pci_power_t device_state = pdev->current_state;
3251
3252         dev_warn(dev, "resuming from operating state [D%d]\n",
3253                  device_state);
3254         pci_set_power_state(pdev, PCI_D0);
3255         pci_enable_wake(pdev, PCI_D0, 0);
3256         pci_restore_state(pdev);
3257         rc = pci_enable_device(pdev);
3258         if (rc)
3259                 dev_err(dev, "enable device failed during resume (%d)\n", rc);
3260
3261         pci_set_master(pdev);
3262         scsi_unblock_requests(shost);
3263         clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
3264
3265         sas_prep_resume_ha(sha);
3266         init_reg_v3_hw(hisi_hba);
3267         hisi_hba->hw->phys_init(hisi_hba);
3268         sas_resume_ha(sha);
3269         clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
3270
3271         return 0;
3272 }
3273
3274 static const struct pci_device_id sas_v3_pci_table[] = {
3275         { PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
3276         {}
3277 };
3278 MODULE_DEVICE_TABLE(pci, sas_v3_pci_table);
3279
3280 static const struct pci_error_handlers hisi_sas_err_handler = {
3281         .reset_prepare  = hisi_sas_reset_prepare_v3_hw,
3282         .reset_done     = hisi_sas_reset_done_v3_hw,
3283 };
3284
3285 static struct pci_driver sas_v3_pci_driver = {
3286         .name           = DRV_NAME,
3287         .id_table       = sas_v3_pci_table,
3288         .probe          = hisi_sas_v3_probe,
3289         .remove         = hisi_sas_v3_remove,
3290         .suspend        = hisi_sas_v3_suspend,
3291         .resume         = hisi_sas_v3_resume,
3292         .err_handler    = &hisi_sas_err_handler,
3293 };
3294
3295 module_pci_driver(sas_v3_pci_driver);
3296 module_param_named(intr_conv, hisi_sas_intr_conv, bool, 0444);
3297
3298 MODULE_LICENSE("GPL");
3299 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
3300 MODULE_DESCRIPTION("HISILICON SAS controller v3 hw driver based on pci device");
3301 MODULE_ALIAS("pci:" DRV_NAME);