2 * Copyright (c) 2016 Linaro Ltd.
3 * Copyright (c) 2016 Hisilicon Limited.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
13 #define DRV_NAME "hisi_sas_v2_hw"
15 /* global registers need init*/
16 #define DLVRY_QUEUE_ENABLE 0x0
17 #define IOST_BASE_ADDR_LO 0x8
18 #define IOST_BASE_ADDR_HI 0xc
19 #define ITCT_BASE_ADDR_LO 0x10
20 #define ITCT_BASE_ADDR_HI 0x14
21 #define IO_BROKEN_MSG_ADDR_LO 0x18
22 #define IO_BROKEN_MSG_ADDR_HI 0x1c
23 #define PHY_CONTEXT 0x20
24 #define PHY_STATE 0x24
25 #define PHY_PORT_NUM_MA 0x28
26 #define PORT_STATE 0x2c
27 #define PORT_STATE_PHY8_PORT_NUM_OFF 16
28 #define PORT_STATE_PHY8_PORT_NUM_MSK (0xf << PORT_STATE_PHY8_PORT_NUM_OFF)
29 #define PORT_STATE_PHY8_CONN_RATE_OFF 20
30 #define PORT_STATE_PHY8_CONN_RATE_MSK (0xf << PORT_STATE_PHY8_CONN_RATE_OFF)
31 #define PHY_CONN_RATE 0x30
32 #define HGC_TRANS_TASK_CNT_LIMIT 0x38
33 #define AXI_AHB_CLK_CFG 0x3c
35 #define ITCT_CLR_EN_OFF 16
36 #define ITCT_CLR_EN_MSK (0x1 << ITCT_CLR_EN_OFF)
37 #define ITCT_DEV_OFF 0
38 #define ITCT_DEV_MSK (0x7ff << ITCT_DEV_OFF)
39 #define AXI_USER1 0x48
40 #define AXI_USER2 0x4c
41 #define IO_SATA_BROKEN_MSG_ADDR_LO 0x58
42 #define IO_SATA_BROKEN_MSG_ADDR_HI 0x5c
43 #define SATA_INITI_D2H_STORE_ADDR_LO 0x60
44 #define SATA_INITI_D2H_STORE_ADDR_HI 0x64
45 #define HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL 0x84
46 #define HGC_SAS_TXFAIL_RETRY_CTRL 0x88
47 #define HGC_GET_ITV_TIME 0x90
48 #define DEVICE_MSG_WORK_MODE 0x94
49 #define OPENA_WT_CONTI_TIME 0x9c
50 #define I_T_NEXUS_LOSS_TIME 0xa0
51 #define MAX_CON_TIME_LIMIT_TIME 0xa4
52 #define BUS_INACTIVE_LIMIT_TIME 0xa8
53 #define REJECT_TO_OPEN_LIMIT_TIME 0xac
54 #define CFG_AGING_TIME 0xbc
55 #define HGC_DFX_CFG2 0xc0
56 #define HGC_IOMB_PROC1_STATUS 0x104
57 #define CFG_1US_TIMER_TRSH 0xcc
58 #define HGC_INVLD_DQE_INFO 0x148
59 #define HGC_INVLD_DQE_INFO_FB_CH0_OFF 9
60 #define HGC_INVLD_DQE_INFO_FB_CH0_MSK (0x1 << HGC_INVLD_DQE_INFO_FB_CH0_OFF)
61 #define HGC_INVLD_DQE_INFO_FB_CH3_OFF 18
62 #define INT_COAL_EN 0x19c
63 #define OQ_INT_COAL_TIME 0x1a0
64 #define OQ_INT_COAL_CNT 0x1a4
65 #define ENT_INT_COAL_TIME 0x1a8
66 #define ENT_INT_COAL_CNT 0x1ac
67 #define OQ_INT_SRC 0x1b0
68 #define OQ_INT_SRC_MSK 0x1b4
69 #define ENT_INT_SRC1 0x1b8
70 #define ENT_INT_SRC1_D2H_FIS_CH0_OFF 0
71 #define ENT_INT_SRC1_D2H_FIS_CH0_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH0_OFF)
72 #define ENT_INT_SRC1_D2H_FIS_CH1_OFF 8
73 #define ENT_INT_SRC1_D2H_FIS_CH1_MSK (0x1 << ENT_INT_SRC1_D2H_FIS_CH1_OFF)
74 #define ENT_INT_SRC2 0x1bc
75 #define ENT_INT_SRC3 0x1c0
76 #define ENT_INT_SRC3_ITC_INT_OFF 15
77 #define ENT_INT_SRC3_ITC_INT_MSK (0x1 << ENT_INT_SRC3_ITC_INT_OFF)
78 #define ENT_INT_SRC_MSK1 0x1c4
79 #define ENT_INT_SRC_MSK2 0x1c8
80 #define ENT_INT_SRC_MSK3 0x1cc
81 #define ENT_INT_SRC_MSK3_ENT95_MSK_OFF 31
82 #define ENT_INT_SRC_MSK3_ENT95_MSK_MSK (0x1 << ENT_INT_SRC_MSK3_ENT95_MSK_OFF)
83 #define SAS_ECC_INTR_MSK 0x1ec
84 #define HGC_ERR_STAT_EN 0x238
85 #define DLVRY_Q_0_BASE_ADDR_LO 0x260
86 #define DLVRY_Q_0_BASE_ADDR_HI 0x264
87 #define DLVRY_Q_0_DEPTH 0x268
88 #define DLVRY_Q_0_WR_PTR 0x26c
89 #define DLVRY_Q_0_RD_PTR 0x270
90 #define HYPER_STREAM_ID_EN_CFG 0xc80
91 #define OQ0_INT_SRC_MSK 0xc90
92 #define COMPL_Q_0_BASE_ADDR_LO 0x4e0
93 #define COMPL_Q_0_BASE_ADDR_HI 0x4e4
94 #define COMPL_Q_0_DEPTH 0x4e8
95 #define COMPL_Q_0_WR_PTR 0x4ec
96 #define COMPL_Q_0_RD_PTR 0x4f0
98 /* phy registers need init */
99 #define PORT_BASE (0x2000)
101 #define PHY_CFG (PORT_BASE + 0x0)
102 #define HARD_PHY_LINKRATE (PORT_BASE + 0x4)
103 #define PHY_CFG_ENA_OFF 0
104 #define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
105 #define PHY_CFG_DC_OPT_OFF 2
106 #define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
107 #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
108 #define PROG_PHY_LINK_RATE_MAX_OFF 0
109 #define PROG_PHY_LINK_RATE_MAX_MSK (0xff << PROG_PHY_LINK_RATE_MAX_OFF)
110 #define PHY_CTRL (PORT_BASE + 0x14)
111 #define PHY_CTRL_RESET_OFF 0
112 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
113 #define SAS_PHY_CTRL (PORT_BASE + 0x20)
114 #define SL_CFG (PORT_BASE + 0x84)
115 #define PHY_PCN (PORT_BASE + 0x44)
116 #define SL_TOUT_CFG (PORT_BASE + 0x8c)
117 #define SL_CONTROL (PORT_BASE + 0x94)
118 #define SL_CONTROL_NOTIFY_EN_OFF 0
119 #define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
120 #define TX_ID_DWORD0 (PORT_BASE + 0x9c)
121 #define TX_ID_DWORD1 (PORT_BASE + 0xa0)
122 #define TX_ID_DWORD2 (PORT_BASE + 0xa4)
123 #define TX_ID_DWORD3 (PORT_BASE + 0xa8)
124 #define TX_ID_DWORD4 (PORT_BASE + 0xaC)
125 #define TX_ID_DWORD5 (PORT_BASE + 0xb0)
126 #define TX_ID_DWORD6 (PORT_BASE + 0xb4)
127 #define RX_IDAF_DWORD0 (PORT_BASE + 0xc4)
128 #define RX_IDAF_DWORD1 (PORT_BASE + 0xc8)
129 #define RX_IDAF_DWORD2 (PORT_BASE + 0xcc)
130 #define RX_IDAF_DWORD3 (PORT_BASE + 0xd0)
131 #define RX_IDAF_DWORD4 (PORT_BASE + 0xd4)
132 #define RX_IDAF_DWORD5 (PORT_BASE + 0xd8)
133 #define RX_IDAF_DWORD6 (PORT_BASE + 0xdc)
134 #define RXOP_CHECK_CFG_H (PORT_BASE + 0xfc)
135 #define DONE_RECEIVED_TIME (PORT_BASE + 0x11c)
136 #define CHL_INT0 (PORT_BASE + 0x1b4)
137 #define CHL_INT0_HOTPLUG_TOUT_OFF 0
138 #define CHL_INT0_HOTPLUG_TOUT_MSK (0x1 << CHL_INT0_HOTPLUG_TOUT_OFF)
139 #define CHL_INT0_SL_RX_BCST_ACK_OFF 1
140 #define CHL_INT0_SL_RX_BCST_ACK_MSK (0x1 << CHL_INT0_SL_RX_BCST_ACK_OFF)
141 #define CHL_INT0_SL_PHY_ENABLE_OFF 2
142 #define CHL_INT0_SL_PHY_ENABLE_MSK (0x1 << CHL_INT0_SL_PHY_ENABLE_OFF)
143 #define CHL_INT0_NOT_RDY_OFF 4
144 #define CHL_INT0_NOT_RDY_MSK (0x1 << CHL_INT0_NOT_RDY_OFF)
145 #define CHL_INT0_PHY_RDY_OFF 5
146 #define CHL_INT0_PHY_RDY_MSK (0x1 << CHL_INT0_PHY_RDY_OFF)
147 #define CHL_INT1 (PORT_BASE + 0x1b8)
148 #define CHL_INT1_DMAC_TX_ECC_ERR_OFF 15
149 #define CHL_INT1_DMAC_TX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_TX_ECC_ERR_OFF)
150 #define CHL_INT1_DMAC_RX_ECC_ERR_OFF 17
151 #define CHL_INT1_DMAC_RX_ECC_ERR_MSK (0x1 << CHL_INT1_DMAC_RX_ECC_ERR_OFF)
152 #define CHL_INT2 (PORT_BASE + 0x1bc)
153 #define CHL_INT0_MSK (PORT_BASE + 0x1c0)
154 #define CHL_INT1_MSK (PORT_BASE + 0x1c4)
155 #define CHL_INT2_MSK (PORT_BASE + 0x1c8)
156 #define CHL_INT_COAL_EN (PORT_BASE + 0x1d0)
157 #define PHY_CTRL_RDY_MSK (PORT_BASE + 0x2b0)
158 #define PHYCTRL_NOT_RDY_MSK (PORT_BASE + 0x2b4)
159 #define PHYCTRL_DWS_RESET_MSK (PORT_BASE + 0x2b8)
160 #define PHYCTRL_PHY_ENA_MSK (PORT_BASE + 0x2bc)
161 #define SL_RX_BCAST_CHK_MSK (PORT_BASE + 0x2c0)
162 #define PHYCTRL_OOB_RESTART_MSK (PORT_BASE + 0x2c4)
163 #define DMA_TX_STATUS (PORT_BASE + 0x2d0)
164 #define DMA_TX_STATUS_BUSY_OFF 0
165 #define DMA_TX_STATUS_BUSY_MSK (0x1 << DMA_TX_STATUS_BUSY_OFF)
166 #define DMA_RX_STATUS (PORT_BASE + 0x2e8)
167 #define DMA_RX_STATUS_BUSY_OFF 0
168 #define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
170 #define AXI_CFG (0x5100)
171 #define AM_CFG_MAX_TRANS (0x5010)
172 #define AM_CFG_SINGLE_PORT_MAX_TRANS (0x5014)
174 /* HW dma structures */
175 /* Delivery queue header */
177 #define CMD_HDR_RESP_REPORT_OFF 5
178 #define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
179 #define CMD_HDR_TLR_CTRL_OFF 6
180 #define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
181 #define CMD_HDR_PORT_OFF 18
182 #define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
183 #define CMD_HDR_PRIORITY_OFF 27
184 #define CMD_HDR_PRIORITY_MSK (0x1 << CMD_HDR_PRIORITY_OFF)
185 #define CMD_HDR_CMD_OFF 29
186 #define CMD_HDR_CMD_MSK (0x7 << CMD_HDR_CMD_OFF)
188 #define CMD_HDR_DIR_OFF 5
189 #define CMD_HDR_DIR_MSK (0x3 << CMD_HDR_DIR_OFF)
190 #define CMD_HDR_RESET_OFF 7
191 #define CMD_HDR_RESET_MSK (0x1 << CMD_HDR_RESET_OFF)
192 #define CMD_HDR_VDTL_OFF 10
193 #define CMD_HDR_VDTL_MSK (0x1 << CMD_HDR_VDTL_OFF)
194 #define CMD_HDR_FRAME_TYPE_OFF 11
195 #define CMD_HDR_FRAME_TYPE_MSK (0x1f << CMD_HDR_FRAME_TYPE_OFF)
196 #define CMD_HDR_DEV_ID_OFF 16
197 #define CMD_HDR_DEV_ID_MSK (0xffff << CMD_HDR_DEV_ID_OFF)
199 #define CMD_HDR_CFL_OFF 0
200 #define CMD_HDR_CFL_MSK (0x1ff << CMD_HDR_CFL_OFF)
201 #define CMD_HDR_NCQ_TAG_OFF 10
202 #define CMD_HDR_NCQ_TAG_MSK (0x1f << CMD_HDR_NCQ_TAG_OFF)
203 #define CMD_HDR_MRFL_OFF 15
204 #define CMD_HDR_MRFL_MSK (0x1ff << CMD_HDR_MRFL_OFF)
205 #define CMD_HDR_SG_MOD_OFF 24
206 #define CMD_HDR_SG_MOD_MSK (0x3 << CMD_HDR_SG_MOD_OFF)
207 #define CMD_HDR_FIRST_BURST_OFF 26
208 #define CMD_HDR_FIRST_BURST_MSK (0x1 << CMD_HDR_SG_MOD_OFF)
210 #define CMD_HDR_IPTT_OFF 0
211 #define CMD_HDR_IPTT_MSK (0xffff << CMD_HDR_IPTT_OFF)
213 #define CMD_HDR_DIF_SGL_LEN_OFF 0
214 #define CMD_HDR_DIF_SGL_LEN_MSK (0xffff << CMD_HDR_DIF_SGL_LEN_OFF)
215 #define CMD_HDR_DATA_SGL_LEN_OFF 16
216 #define CMD_HDR_DATA_SGL_LEN_MSK (0xffff << CMD_HDR_DATA_SGL_LEN_OFF)
218 /* Completion header */
220 #define CMPLT_HDR_RSPNS_XFRD_OFF 10
221 #define CMPLT_HDR_RSPNS_XFRD_MSK (0x1 << CMPLT_HDR_RSPNS_XFRD_OFF)
222 #define CMPLT_HDR_ERX_OFF 12
223 #define CMPLT_HDR_ERX_MSK (0x1 << CMPLT_HDR_ERX_OFF)
225 #define CMPLT_HDR_IPTT_OFF 0
226 #define CMPLT_HDR_IPTT_MSK (0xffff << CMPLT_HDR_IPTT_OFF)
227 #define CMPLT_HDR_DEV_ID_OFF 16
228 #define CMPLT_HDR_DEV_ID_MSK (0xffff << CMPLT_HDR_DEV_ID_OFF)
232 #define ITCT_HDR_DEV_TYPE_OFF 0
233 #define ITCT_HDR_DEV_TYPE_MSK (0x3 << ITCT_HDR_DEV_TYPE_OFF)
234 #define ITCT_HDR_VALID_OFF 2
235 #define ITCT_HDR_VALID_MSK (0x1 << ITCT_HDR_VALID_OFF)
236 #define ITCT_HDR_MCR_OFF 5
237 #define ITCT_HDR_MCR_MSK (0xf << ITCT_HDR_MCR_OFF)
238 #define ITCT_HDR_VLN_OFF 9
239 #define ITCT_HDR_VLN_MSK (0xf << ITCT_HDR_VLN_OFF)
240 #define ITCT_HDR_PORT_ID_OFF 28
241 #define ITCT_HDR_PORT_ID_MSK (0xf << ITCT_HDR_PORT_ID_OFF)
243 #define ITCT_HDR_INLT_OFF 0
244 #define ITCT_HDR_INLT_MSK (0xffffULL << ITCT_HDR_INLT_OFF)
245 #define ITCT_HDR_BITLT_OFF 16
246 #define ITCT_HDR_BITLT_MSK (0xffffULL << ITCT_HDR_BITLT_OFF)
247 #define ITCT_HDR_MCTLT_OFF 32
248 #define ITCT_HDR_MCTLT_MSK (0xffffULL << ITCT_HDR_MCTLT_OFF)
249 #define ITCT_HDR_RTOLT_OFF 48
250 #define ITCT_HDR_RTOLT_MSK (0xffffULL << ITCT_HDR_RTOLT_OFF)
252 struct hisi_sas_complete_v2_hdr
{
259 struct hisi_sas_err_record_v2
{
261 __le32 trans_tx_fail_type
;
264 __le32 trans_rx_fail_type
;
267 __le16 dma_tx_err_type
;
268 __le16 sipc_rx_err_type
;
271 __le32 dma_rx_err_type
;
275 HISI_SAS_PHY_PHY_UPDOWN
,
276 HISI_SAS_PHY_CHNL_INT
,
281 TRANS_TX_FAIL_BASE
= 0x0, /* dw0 */
282 TRANS_RX_FAIL_BASE
= 0x100, /* dw1 */
283 DMA_TX_ERR_BASE
= 0x200, /* dw2 bit 15-0 */
284 SIPC_RX_ERR_BASE
= 0x300, /* dw2 bit 31-16*/
285 DMA_RX_ERR_BASE
= 0x400, /* dw3 */
288 TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS
= TRANS_TX_FAIL_BASE
, /* 0x0 */
289 TRANS_TX_ERR_PHY_NOT_ENABLE
, /* 0x1 */
290 TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION
, /* 0x2 */
291 TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION
, /* 0x3 */
292 TRANS_TX_OPEN_CNX_ERR_BY_OTHER
, /* 0x4 */
294 TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT
, /* 0x6 */
295 TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY
, /* 0x7 */
296 TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED
, /* 0x8 */
297 TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED
, /* 0x9 */
298 TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION
, /* 0xa */
299 TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD
, /* 0xb */
300 TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER
, /* 0xc */
301 TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED
, /* 0xd */
302 TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT
, /* 0xe */
303 TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION
, /* 0xf */
304 TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED
, /* 0x10 */
305 TRANS_TX_ERR_FRAME_TXED
, /* 0x11 */
306 TRANS_TX_ERR_WITH_BREAK_TIMEOUT
, /* 0x12 */
307 TRANS_TX_ERR_WITH_BREAK_REQUEST
, /* 0x13 */
308 TRANS_TX_ERR_WITH_BREAK_RECEVIED
, /* 0x14 */
309 TRANS_TX_ERR_WITH_CLOSE_TIMEOUT
, /* 0x15 */
310 TRANS_TX_ERR_WITH_CLOSE_NORMAL
, /* 0x16 for ssp*/
311 TRANS_TX_ERR_WITH_CLOSE_PHYDISALE
, /* 0x17 */
312 TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT
, /* 0x18 */
313 TRANS_TX_ERR_WITH_CLOSE_COMINIT
, /* 0x19 */
314 TRANS_TX_ERR_WITH_NAK_RECEVIED
, /* 0x1a for ssp*/
315 TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT
, /* 0x1b for ssp*/
316 /*IO_TX_ERR_WITH_R_ERR_RECEVIED, [> 0x1b for sata/stp<] */
317 TRANS_TX_ERR_WITH_CREDIT_TIMEOUT
, /* 0x1c for ssp */
318 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST 0x1c for sata/stp */
319 TRANS_TX_ERR_WITH_IPTT_CONFLICT
, /* 0x1d for ssp/smp */
320 TRANS_TX_ERR_WITH_OPEN_BY_DES_OR_OTHERS
, /* 0x1e */
321 /*IO_TX_ERR_WITH_SYNC_RXD, [> 0x1e <] for sata/stp */
322 TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT
, /* 0x1f for sata/stp */
325 TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR
= TRANS_RX_FAIL_BASE
, /* 0x100 */
326 TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR
, /* 0x101 for sata/stp */
327 TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM
, /* 0x102 for ssp/smp */
328 /*IO_ERR_WITH_RXFIS_8B10B_CODE_ERR, [> 0x102 <] for sata/stp */
329 TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR
, /* 0x103 for sata/stp */
330 TRANS_RX_ERR_WITH_RXFIS_CRC_ERR
, /* 0x104 for sata/stp */
331 TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN
, /* 0x105 for smp */
332 /*IO_ERR_WITH_RXFIS_TX SYNCP, [> 0x105 <] for sata/stp */
333 TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP
, /* 0x106 for sata/stp*/
334 TRANS_RX_ERR_WITH_LINK_BUF_OVERRUN
, /* 0x107 */
335 TRANS_RX_ERR_WITH_BREAK_TIMEOUT
, /* 0x108 */
336 TRANS_RX_ERR_WITH_BREAK_REQUEST
, /* 0x109 */
337 TRANS_RX_ERR_WITH_BREAK_RECEVIED
, /* 0x10a */
338 RESERVED1
, /* 0x10b */
339 TRANS_RX_ERR_WITH_CLOSE_NORMAL
, /* 0x10c */
340 TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE
, /* 0x10d */
341 TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT
, /* 0x10e */
342 TRANS_RX_ERR_WITH_CLOSE_COMINIT
, /* 0x10f */
343 TRANS_RX_ERR_WITH_DATA_LEN0
, /* 0x110 for ssp/smp */
344 TRANS_RX_ERR_WITH_BAD_HASH
, /* 0x111 for ssp */
345 /*IO_RX_ERR_WITH_FIS_TOO_SHORT, [> 0x111 <] for sata/stp */
346 TRANS_RX_XRDY_WLEN_ZERO_ERR
, /* 0x112 for ssp*/
347 /*IO_RX_ERR_WITH_FIS_TOO_LONG, [> 0x112 <] for sata/stp */
348 TRANS_RX_SSP_FRM_LEN_ERR
, /* 0x113 for ssp */
349 /*IO_RX_ERR_WITH_SATA_DEVICE_LOST, [> 0x113 <] for sata */
350 RESERVED2
, /* 0x114 */
351 RESERVED3
, /* 0x115 */
352 RESERVED4
, /* 0x116 */
353 RESERVED5
, /* 0x117 */
354 TRANS_RX_ERR_WITH_BAD_FRM_TYPE
, /* 0x118 */
355 TRANS_RX_SMP_FRM_LEN_ERR
, /* 0x119 */
356 TRANS_RX_SMP_RESP_TIMEOUT_ERR
, /* 0x11a */
357 RESERVED6
, /* 0x11b */
358 RESERVED7
, /* 0x11c */
359 RESERVED8
, /* 0x11d */
360 RESERVED9
, /* 0x11e */
361 TRANS_RX_R_ERR
, /* 0x11f */
364 DMA_TX_DIF_CRC_ERR
= DMA_TX_ERR_BASE
, /* 0x200 */
365 DMA_TX_DIF_APP_ERR
, /* 0x201 */
366 DMA_TX_DIF_RPP_ERR
, /* 0x202 */
367 DMA_TX_DATA_SGL_OVERFLOW
, /* 0x203 */
368 DMA_TX_DIF_SGL_OVERFLOW
, /* 0x204 */
369 DMA_TX_UNEXP_XFER_ERR
, /* 0x205 */
370 DMA_TX_UNEXP_RETRANS_ERR
, /* 0x206 */
371 DMA_TX_XFER_LEN_OVERFLOW
, /* 0x207 */
372 DMA_TX_XFER_OFFSET_ERR
, /* 0x208 */
373 DMA_TX_RAM_ECC_ERR
, /* 0x209 */
374 DMA_TX_DIF_LEN_ALIGN_ERR
, /* 0x20a */
377 SIPC_RX_FIS_STATUS_ERR_BIT_VLD
= SIPC_RX_ERR_BASE
, /* 0x300 */
378 SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR
, /* 0x301 */
379 SIPC_RX_FIS_STATUS_BSY_BIT_ERR
, /* 0x302 */
380 SIPC_RX_WRSETUP_LEN_ODD_ERR
, /* 0x303 */
381 SIPC_RX_WRSETUP_LEN_ZERO_ERR
, /* 0x304 */
382 SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR
, /* 0x305 */
383 SIPC_RX_NCQ_WRSETUP_OFFSET_ERR
, /* 0x306 */
384 SIPC_RX_NCQ_WRSETUP_AUTO_ACTIVE_ERR
, /* 0x307 */
385 SIPC_RX_SATA_UNEXP_FIS_ERR
, /* 0x308 */
386 SIPC_RX_WRSETUP_ESTATUS_ERR
, /* 0x309 */
387 SIPC_RX_DATA_UNDERFLOW_ERR
, /* 0x30a */
390 DMA_RX_DIF_CRC_ERR
= DMA_RX_ERR_BASE
, /* 0x400 */
391 DMA_RX_DIF_APP_ERR
, /* 0x401 */
392 DMA_RX_DIF_RPP_ERR
, /* 0x402 */
393 DMA_RX_DATA_SGL_OVERFLOW
, /* 0x403 */
394 DMA_RX_DIF_SGL_OVERFLOW
, /* 0x404 */
395 DMA_RX_DATA_LEN_OVERFLOW
, /* 0x405 */
396 DMA_RX_DATA_LEN_UNDERFLOW
, /* 0x406 */
397 DMA_RX_DATA_OFFSET_ERR
, /* 0x407 */
398 RESERVED10
, /* 0x408 */
399 DMA_RX_SATA_FRAME_TYPE_ERR
, /* 0x409 */
400 DMA_RX_RESP_BUF_OVERFLOW
, /* 0x40a */
401 DMA_RX_UNEXP_RETRANS_RESP_ERR
, /* 0x40b */
402 DMA_RX_UNEXP_NORM_RESP_ERR
, /* 0x40c */
403 DMA_RX_UNEXP_RDFRAME_ERR
, /* 0x40d */
404 DMA_RX_PIO_DATA_LEN_ERR
, /* 0x40e */
405 DMA_RX_RDSETUP_STATUS_ERR
, /* 0x40f */
406 DMA_RX_RDSETUP_STATUS_DRQ_ERR
, /* 0x410 */
407 DMA_RX_RDSETUP_STATUS_BSY_ERR
, /* 0x411 */
408 DMA_RX_RDSETUP_LEN_ODD_ERR
, /* 0x412 */
409 DMA_RX_RDSETUP_LEN_ZERO_ERR
, /* 0x413 */
410 DMA_RX_RDSETUP_LEN_OVER_ERR
, /* 0x414 */
411 DMA_RX_RDSETUP_OFFSET_ERR
, /* 0x415 */
412 DMA_RX_RDSETUP_ACTIVE_ERR
, /* 0x416 */
413 DMA_RX_RDSETUP_ESTATUS_ERR
, /* 0x417 */
414 DMA_RX_RAM_ECC_ERR
, /* 0x418 */
415 DMA_RX_UNKNOWN_FRM_ERR
, /* 0x419 */
418 #define HISI_SAS_COMMAND_ENTRIES_V2_HW 4096
420 #define DIR_NO_DATA 0
422 #define DIR_TO_DEVICE 2
423 #define DIR_RESERVED 3
425 #define SATA_PROTOCOL_NONDATA 0x1
426 #define SATA_PROTOCOL_PIO 0x2
427 #define SATA_PROTOCOL_DMA 0x4
428 #define SATA_PROTOCOL_FPDMA 0x8
429 #define SATA_PROTOCOL_ATAPI 0x10
431 static u32
hisi_sas_read32(struct hisi_hba
*hisi_hba
, u32 off
)
433 void __iomem
*regs
= hisi_hba
->regs
+ off
;
438 static u32
hisi_sas_read32_relaxed(struct hisi_hba
*hisi_hba
, u32 off
)
440 void __iomem
*regs
= hisi_hba
->regs
+ off
;
442 return readl_relaxed(regs
);
445 static void hisi_sas_write32(struct hisi_hba
*hisi_hba
, u32 off
, u32 val
)
447 void __iomem
*regs
= hisi_hba
->regs
+ off
;
452 static void hisi_sas_phy_write32(struct hisi_hba
*hisi_hba
, int phy_no
,
455 void __iomem
*regs
= hisi_hba
->regs
+ (0x400 * phy_no
) + off
;
460 static u32
hisi_sas_phy_read32(struct hisi_hba
*hisi_hba
,
463 void __iomem
*regs
= hisi_hba
->regs
+ (0x400 * phy_no
) + off
;
468 /* This function needs to be protected from pre-emption. */
470 slot_index_alloc_quirk_v2_hw(struct hisi_hba
*hisi_hba
, int *slot_idx
,
471 struct domain_device
*device
)
473 unsigned int index
= 0;
474 void *bitmap
= hisi_hba
->slot_index_tags
;
475 int sata_dev
= dev_is_sata(device
);
478 index
= find_next_zero_bit(bitmap
, hisi_hba
->slot_index_count
,
480 if (index
>= hisi_hba
->slot_index_count
)
481 return -SAS_QUEUE_FULL
;
483 * SAS IPTT bit0 should be 1
485 if (sata_dev
|| (index
& 1))
490 set_bit(index
, bitmap
);
495 static void config_phy_opt_mode_v2_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
497 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
499 cfg
&= ~PHY_CFG_DC_OPT_MSK
;
500 cfg
|= 1 << PHY_CFG_DC_OPT_OFF
;
501 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
504 static void config_id_frame_v2_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
506 struct sas_identify_frame identify_frame
;
507 u32
*identify_buffer
;
509 memset(&identify_frame
, 0, sizeof(identify_frame
));
510 identify_frame
.dev_type
= SAS_END_DEVICE
;
511 identify_frame
.frame_type
= 0;
512 identify_frame
._un1
= 1;
513 identify_frame
.initiator_bits
= SAS_PROTOCOL_ALL
;
514 identify_frame
.target_bits
= SAS_PROTOCOL_NONE
;
515 memcpy(&identify_frame
._un4_11
[0], hisi_hba
->sas_addr
, SAS_ADDR_SIZE
);
516 memcpy(&identify_frame
.sas_addr
[0], hisi_hba
->sas_addr
, SAS_ADDR_SIZE
);
517 identify_frame
.phy_id
= phy_no
;
518 identify_buffer
= (u32
*)(&identify_frame
);
520 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD0
,
521 __swab32(identify_buffer
[0]));
522 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD1
,
524 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD2
,
526 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD3
,
528 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD4
,
530 hisi_sas_phy_write32(hisi_hba
, phy_no
, TX_ID_DWORD5
,
531 __swab32(identify_buffer
[5]));
534 static void init_id_frame_v2_hw(struct hisi_hba
*hisi_hba
)
538 for (i
= 0; i
< hisi_hba
->n_phy
; i
++)
539 config_id_frame_v2_hw(hisi_hba
, i
);
542 static void setup_itct_v2_hw(struct hisi_hba
*hisi_hba
,
543 struct hisi_sas_device
*sas_dev
)
545 struct domain_device
*device
= sas_dev
->sas_device
;
546 struct device
*dev
= &hisi_hba
->pdev
->dev
;
547 u64 qw0
, device_id
= sas_dev
->device_id
;
548 struct hisi_sas_itct
*itct
= &hisi_hba
->itct
[device_id
];
549 struct domain_device
*parent_dev
= device
->parent
;
550 struct hisi_sas_port
*port
= device
->port
->lldd_port
;
552 memset(itct
, 0, sizeof(*itct
));
556 switch (sas_dev
->dev_type
) {
558 case SAS_EDGE_EXPANDER_DEVICE
:
559 case SAS_FANOUT_EXPANDER_DEVICE
:
560 qw0
= HISI_SAS_DEV_TYPE_SSP
<< ITCT_HDR_DEV_TYPE_OFF
;
563 if (parent_dev
&& DEV_IS_EXPANDER(parent_dev
->dev_type
))
564 qw0
= HISI_SAS_DEV_TYPE_STP
<< ITCT_HDR_DEV_TYPE_OFF
;
566 qw0
= HISI_SAS_DEV_TYPE_SATA
<< ITCT_HDR_DEV_TYPE_OFF
;
569 dev_warn(dev
, "setup itct: unsupported dev type (%d)\n",
573 qw0
|= ((1 << ITCT_HDR_VALID_OFF
) |
574 (device
->linkrate
<< ITCT_HDR_MCR_OFF
) |
575 (1 << ITCT_HDR_VLN_OFF
) |
576 (port
->id
<< ITCT_HDR_PORT_ID_OFF
));
577 itct
->qw0
= cpu_to_le64(qw0
);
580 memcpy(&itct
->sas_addr
, device
->sas_addr
, SAS_ADDR_SIZE
);
581 itct
->sas_addr
= __swab64(itct
->sas_addr
);
584 if (!dev_is_sata(device
))
585 itct
->qw2
= cpu_to_le64((500ULL << ITCT_HDR_INLT_OFF
) |
586 (0x1ULL
<< ITCT_HDR_BITLT_OFF
) |
587 (0x32ULL
<< ITCT_HDR_MCTLT_OFF
) |
588 (0x1ULL
<< ITCT_HDR_RTOLT_OFF
));
591 static void free_device_v2_hw(struct hisi_hba
*hisi_hba
,
592 struct hisi_sas_device
*sas_dev
)
594 u64 qw0
, dev_id
= sas_dev
->device_id
;
595 struct device
*dev
= &hisi_hba
->pdev
->dev
;
596 struct hisi_sas_itct
*itct
= &hisi_hba
->itct
[dev_id
];
597 u32 reg_val
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC3
);
600 /* clear the itct interrupt state */
601 if (ENT_INT_SRC3_ITC_INT_MSK
& reg_val
)
602 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
,
603 ENT_INT_SRC3_ITC_INT_MSK
);
605 /* clear the itct int*/
606 for (i
= 0; i
< 2; i
++) {
607 /* clear the itct table*/
608 reg_val
= hisi_sas_read32(hisi_hba
, ITCT_CLR
);
609 reg_val
|= ITCT_CLR_EN_MSK
| (dev_id
& ITCT_DEV_MSK
);
610 hisi_sas_write32(hisi_hba
, ITCT_CLR
, reg_val
);
613 reg_val
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC3
);
614 if (ENT_INT_SRC3_ITC_INT_MSK
& reg_val
) {
615 dev_dbg(dev
, "got clear ITCT done interrupt\n");
617 /* invalid the itct state*/
618 qw0
= cpu_to_le64(itct
->qw0
);
619 qw0
&= ~(1 << ITCT_HDR_VALID_OFF
);
620 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
,
621 ENT_INT_SRC3_ITC_INT_MSK
);
622 hisi_hba
->devices
[dev_id
].dev_type
= SAS_PHY_UNUSED
;
623 hisi_hba
->devices
[dev_id
].dev_status
= HISI_SAS_DEV_NORMAL
;
626 hisi_sas_write32(hisi_hba
, ITCT_CLR
, 0);
627 dev_dbg(dev
, "clear ITCT ok\n");
633 static int reset_hw_v2_hw(struct hisi_hba
*hisi_hba
)
637 unsigned long end_time
;
638 struct device
*dev
= &hisi_hba
->pdev
->dev
;
640 /* The mask needs to be set depending on the number of phys */
641 if (hisi_hba
->n_phy
== 9)
642 reset_val
= 0x1fffff;
646 /* Disable all of the DQ */
647 for (i
= 0; i
< HISI_SAS_MAX_QUEUES
; i
++)
648 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
, 0);
650 /* Disable all of the PHYs */
651 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
652 u32 phy_cfg
= hisi_sas_phy_read32(hisi_hba
, i
, PHY_CFG
);
654 phy_cfg
&= ~PHY_CTRL_RESET_MSK
;
655 hisi_sas_phy_write32(hisi_hba
, i
, PHY_CFG
, phy_cfg
);
659 /* Ensure DMA tx & rx idle */
660 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
661 u32 dma_tx_status
, dma_rx_status
;
663 end_time
= jiffies
+ msecs_to_jiffies(1000);
666 dma_tx_status
= hisi_sas_phy_read32(hisi_hba
, i
,
668 dma_rx_status
= hisi_sas_phy_read32(hisi_hba
, i
,
671 if (!(dma_tx_status
& DMA_TX_STATUS_BUSY_MSK
) &&
672 !(dma_rx_status
& DMA_RX_STATUS_BUSY_MSK
))
676 if (time_after(jiffies
, end_time
))
681 /* Ensure axi bus idle */
682 end_time
= jiffies
+ msecs_to_jiffies(1000);
685 hisi_sas_read32(hisi_hba
, AXI_CFG
);
691 if (time_after(jiffies
, end_time
))
695 /* reset and disable clock*/
696 regmap_write(hisi_hba
->ctrl
, hisi_hba
->ctrl_reset_reg
,
698 regmap_write(hisi_hba
->ctrl
, hisi_hba
->ctrl_clock_ena_reg
+ 4,
701 regmap_read(hisi_hba
->ctrl
, hisi_hba
->ctrl_reset_sts_reg
, &val
);
702 if (reset_val
!= (val
& reset_val
)) {
703 dev_err(dev
, "SAS reset fail.\n");
707 /* De-reset and enable clock*/
708 regmap_write(hisi_hba
->ctrl
, hisi_hba
->ctrl_reset_reg
+ 4,
710 regmap_write(hisi_hba
->ctrl
, hisi_hba
->ctrl_clock_ena_reg
,
713 regmap_read(hisi_hba
->ctrl
, hisi_hba
->ctrl_reset_sts_reg
,
715 if (val
& reset_val
) {
716 dev_err(dev
, "SAS de-reset fail.\n");
723 static void init_reg_v2_hw(struct hisi_hba
*hisi_hba
)
725 struct device
*dev
= &hisi_hba
->pdev
->dev
;
726 struct device_node
*np
= dev
->of_node
;
729 /* Global registers init */
731 /* Deal with am-max-transmissions quirk */
732 if (of_get_property(np
, "hip06-sas-v2-quirk-amt", NULL
)) {
733 hisi_sas_write32(hisi_hba
, AM_CFG_MAX_TRANS
, 0x2020);
734 hisi_sas_write32(hisi_hba
, AM_CFG_SINGLE_PORT_MAX_TRANS
,
736 } /* Else, use defaults -> do nothing */
738 hisi_sas_write32(hisi_hba
, DLVRY_QUEUE_ENABLE
,
739 (u32
)((1ULL << hisi_hba
->queue_count
) - 1));
740 hisi_sas_write32(hisi_hba
, AXI_USER1
, 0xc0000000);
741 hisi_sas_write32(hisi_hba
, AXI_USER2
, 0x10000);
742 hisi_sas_write32(hisi_hba
, HGC_SAS_TXFAIL_RETRY_CTRL
, 0x108);
743 hisi_sas_write32(hisi_hba
, HGC_SAS_TX_OPEN_FAIL_RETRY_CTRL
, 0x7FF);
744 hisi_sas_write32(hisi_hba
, OPENA_WT_CONTI_TIME
, 0x1);
745 hisi_sas_write32(hisi_hba
, I_T_NEXUS_LOSS_TIME
, 0x1F4);
746 hisi_sas_write32(hisi_hba
, MAX_CON_TIME_LIMIT_TIME
, 0x32);
747 hisi_sas_write32(hisi_hba
, BUS_INACTIVE_LIMIT_TIME
, 0x1);
748 hisi_sas_write32(hisi_hba
, CFG_AGING_TIME
, 0x1);
749 hisi_sas_write32(hisi_hba
, HGC_ERR_STAT_EN
, 0x1);
750 hisi_sas_write32(hisi_hba
, HGC_GET_ITV_TIME
, 0x1);
751 hisi_sas_write32(hisi_hba
, INT_COAL_EN
, 0x1);
752 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_TIME
, 0x1);
753 hisi_sas_write32(hisi_hba
, OQ_INT_COAL_CNT
, 0x1);
754 hisi_sas_write32(hisi_hba
, ENT_INT_COAL_TIME
, 0x1);
755 hisi_sas_write32(hisi_hba
, ENT_INT_COAL_CNT
, 0x1);
756 hisi_sas_write32(hisi_hba
, OQ_INT_SRC
, 0x0);
757 hisi_sas_write32(hisi_hba
, ENT_INT_SRC1
, 0xffffffff);
758 hisi_sas_write32(hisi_hba
, ENT_INT_SRC2
, 0xffffffff);
759 hisi_sas_write32(hisi_hba
, ENT_INT_SRC3
, 0xffffffff);
760 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK1
, 0x7efefefe);
761 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK2
, 0x7efefefe);
762 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, 0x7ffffffe);
763 hisi_sas_write32(hisi_hba
, SAS_ECC_INTR_MSK
, 0xfffff3c0);
764 for (i
= 0; i
< hisi_hba
->queue_count
; i
++)
765 hisi_sas_write32(hisi_hba
, OQ0_INT_SRC_MSK
+0x4*i
, 0);
767 hisi_sas_write32(hisi_hba
, AXI_AHB_CLK_CFG
, 1);
768 hisi_sas_write32(hisi_hba
, HYPER_STREAM_ID_EN_CFG
, 1);
770 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
771 hisi_sas_phy_write32(hisi_hba
, i
, PROG_PHY_LINK_RATE
, 0x855);
772 hisi_sas_phy_write32(hisi_hba
, i
, SAS_PHY_CTRL
, 0x30b9908);
773 hisi_sas_phy_write32(hisi_hba
, i
, SL_TOUT_CFG
, 0x7d7d7d7d);
774 hisi_sas_phy_write32(hisi_hba
, i
, DONE_RECEIVED_TIME
, 0x10);
775 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT0
, 0xffffffff);
776 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT1
, 0xffffffff);
777 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2
, 0xffffffff);
778 hisi_sas_phy_write32(hisi_hba
, i
, RXOP_CHECK_CFG_H
, 0x1000);
779 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT1_MSK
, 0xffffffff);
780 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2_MSK
, 0x8ffffbff);
781 hisi_sas_phy_write32(hisi_hba
, i
, SL_CFG
, 0x23f801fc);
782 hisi_sas_phy_write32(hisi_hba
, i
, PHY_CTRL_RDY_MSK
, 0x0);
783 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_NOT_RDY_MSK
, 0x0);
784 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_DWS_RESET_MSK
, 0x0);
785 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_PHY_ENA_MSK
, 0x0);
786 hisi_sas_phy_write32(hisi_hba
, i
, SL_RX_BCAST_CHK_MSK
, 0x0);
787 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT_COAL_EN
, 0x0);
788 hisi_sas_phy_write32(hisi_hba
, i
, PHYCTRL_OOB_RESTART_MSK
, 0x0);
789 hisi_sas_phy_write32(hisi_hba
, i
, PHY_CTRL
, 0x199B694);
792 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
794 hisi_sas_write32(hisi_hba
,
795 DLVRY_Q_0_BASE_ADDR_HI
+ (i
* 0x14),
796 upper_32_bits(hisi_hba
->cmd_hdr_dma
[i
]));
798 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_BASE_ADDR_LO
+ (i
* 0x14),
799 lower_32_bits(hisi_hba
->cmd_hdr_dma
[i
]));
801 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_DEPTH
+ (i
* 0x14),
802 HISI_SAS_QUEUE_SLOTS
);
804 /* Completion queue */
805 hisi_sas_write32(hisi_hba
, COMPL_Q_0_BASE_ADDR_HI
+ (i
* 0x14),
806 upper_32_bits(hisi_hba
->complete_hdr_dma
[i
]));
808 hisi_sas_write32(hisi_hba
, COMPL_Q_0_BASE_ADDR_LO
+ (i
* 0x14),
809 lower_32_bits(hisi_hba
->complete_hdr_dma
[i
]));
811 hisi_sas_write32(hisi_hba
, COMPL_Q_0_DEPTH
+ (i
* 0x14),
812 HISI_SAS_QUEUE_SLOTS
);
816 hisi_sas_write32(hisi_hba
, ITCT_BASE_ADDR_LO
,
817 lower_32_bits(hisi_hba
->itct_dma
));
819 hisi_sas_write32(hisi_hba
, ITCT_BASE_ADDR_HI
,
820 upper_32_bits(hisi_hba
->itct_dma
));
823 hisi_sas_write32(hisi_hba
, IOST_BASE_ADDR_LO
,
824 lower_32_bits(hisi_hba
->iost_dma
));
826 hisi_sas_write32(hisi_hba
, IOST_BASE_ADDR_HI
,
827 upper_32_bits(hisi_hba
->iost_dma
));
830 hisi_sas_write32(hisi_hba
, IO_BROKEN_MSG_ADDR_LO
,
831 lower_32_bits(hisi_hba
->breakpoint_dma
));
833 hisi_sas_write32(hisi_hba
, IO_BROKEN_MSG_ADDR_HI
,
834 upper_32_bits(hisi_hba
->breakpoint_dma
));
836 /* SATA broken msg */
837 hisi_sas_write32(hisi_hba
, IO_SATA_BROKEN_MSG_ADDR_LO
,
838 lower_32_bits(hisi_hba
->sata_breakpoint_dma
));
840 hisi_sas_write32(hisi_hba
, IO_SATA_BROKEN_MSG_ADDR_HI
,
841 upper_32_bits(hisi_hba
->sata_breakpoint_dma
));
843 /* SATA initial fis */
844 hisi_sas_write32(hisi_hba
, SATA_INITI_D2H_STORE_ADDR_LO
,
845 lower_32_bits(hisi_hba
->initial_fis_dma
));
847 hisi_sas_write32(hisi_hba
, SATA_INITI_D2H_STORE_ADDR_HI
,
848 upper_32_bits(hisi_hba
->initial_fis_dma
));
851 static int hw_init_v2_hw(struct hisi_hba
*hisi_hba
)
853 struct device
*dev
= &hisi_hba
->pdev
->dev
;
856 rc
= reset_hw_v2_hw(hisi_hba
);
858 dev_err(dev
, "hisi_sas_reset_hw failed, rc=%d", rc
);
863 init_reg_v2_hw(hisi_hba
);
865 init_id_frame_v2_hw(hisi_hba
);
870 static void enable_phy_v2_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
872 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
874 cfg
|= PHY_CFG_ENA_MSK
;
875 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
878 static void disable_phy_v2_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
880 u32 cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
882 cfg
&= ~PHY_CFG_ENA_MSK
;
883 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHY_CFG
, cfg
);
886 static void start_phy_v2_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
888 config_id_frame_v2_hw(hisi_hba
, phy_no
);
889 config_phy_opt_mode_v2_hw(hisi_hba
, phy_no
);
890 enable_phy_v2_hw(hisi_hba
, phy_no
);
893 static void stop_phy_v2_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
895 disable_phy_v2_hw(hisi_hba
, phy_no
);
898 static void phy_hard_reset_v2_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
900 stop_phy_v2_hw(hisi_hba
, phy_no
);
902 start_phy_v2_hw(hisi_hba
, phy_no
);
905 static void start_phys_v2_hw(unsigned long data
)
907 struct hisi_hba
*hisi_hba
= (struct hisi_hba
*)data
;
910 for (i
= 0; i
< hisi_hba
->n_phy
; i
++)
911 start_phy_v2_hw(hisi_hba
, i
);
914 static void phys_init_v2_hw(struct hisi_hba
*hisi_hba
)
917 struct timer_list
*timer
= &hisi_hba
->timer
;
919 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
920 hisi_sas_phy_write32(hisi_hba
, i
, CHL_INT2_MSK
, 0x6a);
921 hisi_sas_phy_read32(hisi_hba
, i
, CHL_INT2_MSK
);
924 setup_timer(timer
, start_phys_v2_hw
, (unsigned long)hisi_hba
);
925 mod_timer(timer
, jiffies
+ HZ
);
928 static void sl_notify_v2_hw(struct hisi_hba
*hisi_hba
, int phy_no
)
932 sl_control
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SL_CONTROL
);
933 sl_control
|= SL_CONTROL_NOTIFY_EN_MSK
;
934 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_CONTROL
, sl_control
);
936 sl_control
= hisi_sas_phy_read32(hisi_hba
, phy_no
, SL_CONTROL
);
937 sl_control
&= ~SL_CONTROL_NOTIFY_EN_MSK
;
938 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_CONTROL
, sl_control
);
941 static int get_wideport_bitmap_v2_hw(struct hisi_hba
*hisi_hba
, int port_id
)
944 u32 phy_port_num_ma
= hisi_sas_read32(hisi_hba
, PHY_PORT_NUM_MA
);
945 u32 phy_state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
947 for (i
= 0; i
< (hisi_hba
->n_phy
< 9 ? hisi_hba
->n_phy
: 8); i
++)
948 if (phy_state
& 1 << i
)
949 if (((phy_port_num_ma
>> (i
* 4)) & 0xf) == port_id
)
952 if (hisi_hba
->n_phy
== 9) {
953 u32 port_state
= hisi_sas_read32(hisi_hba
, PORT_STATE
);
955 if (phy_state
& 1 << 8)
956 if (((port_state
& PORT_STATE_PHY8_PORT_NUM_MSK
) >>
957 PORT_STATE_PHY8_PORT_NUM_OFF
) == port_id
)
965 * This function allocates across all queues to load balance.
966 * Slots are allocated from queues in a round-robin fashion.
968 * The callpath to this function and upto writing the write
969 * queue pointer should be safe from interruption.
971 static int get_free_slot_v2_hw(struct hisi_hba
*hisi_hba
, int *q
, int *s
)
973 struct device
*dev
= &hisi_hba
->pdev
->dev
;
975 int queue
= hisi_hba
->queue
;
978 w
= hisi_sas_read32_relaxed(hisi_hba
,
979 DLVRY_Q_0_WR_PTR
+ (queue
* 0x14));
980 r
= hisi_sas_read32_relaxed(hisi_hba
,
981 DLVRY_Q_0_RD_PTR
+ (queue
* 0x14));
982 if (r
== (w
+1) % HISI_SAS_QUEUE_SLOTS
) {
983 queue
= (queue
+ 1) % hisi_hba
->queue_count
;
984 if (queue
== hisi_hba
->queue
) {
985 dev_warn(dev
, "could not find free slot\n");
992 hisi_hba
->queue
= (queue
+ 1) % hisi_hba
->queue_count
;
998 static void start_delivery_v2_hw(struct hisi_hba
*hisi_hba
)
1000 int dlvry_queue
= hisi_hba
->slot_prep
->dlvry_queue
;
1001 int dlvry_queue_slot
= hisi_hba
->slot_prep
->dlvry_queue_slot
;
1003 hisi_sas_write32(hisi_hba
, DLVRY_Q_0_WR_PTR
+ (dlvry_queue
* 0x14),
1004 ++dlvry_queue_slot
% HISI_SAS_QUEUE_SLOTS
);
1007 static int prep_prd_sge_v2_hw(struct hisi_hba
*hisi_hba
,
1008 struct hisi_sas_slot
*slot
,
1009 struct hisi_sas_cmd_hdr
*hdr
,
1010 struct scatterlist
*scatter
,
1013 struct device
*dev
= &hisi_hba
->pdev
->dev
;
1014 struct scatterlist
*sg
;
1017 if (n_elem
> HISI_SAS_SGE_PAGE_CNT
) {
1018 dev_err(dev
, "prd err: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
1023 slot
->sge_page
= dma_pool_alloc(hisi_hba
->sge_page_pool
, GFP_ATOMIC
,
1024 &slot
->sge_page_dma
);
1025 if (!slot
->sge_page
)
1028 for_each_sg(scatter
, sg
, n_elem
, i
) {
1029 struct hisi_sas_sge
*entry
= &slot
->sge_page
->sge
[i
];
1031 entry
->addr
= cpu_to_le64(sg_dma_address(sg
));
1032 entry
->page_ctrl_0
= entry
->page_ctrl_1
= 0;
1033 entry
->data_len
= cpu_to_le32(sg_dma_len(sg
));
1034 entry
->data_off
= 0;
1037 hdr
->prd_table_addr
= cpu_to_le64(slot
->sge_page_dma
);
1039 hdr
->sg_len
= cpu_to_le32(n_elem
<< CMD_HDR_DATA_SGL_LEN_OFF
);
1044 static int prep_smp_v2_hw(struct hisi_hba
*hisi_hba
,
1045 struct hisi_sas_slot
*slot
)
1047 struct sas_task
*task
= slot
->task
;
1048 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1049 struct domain_device
*device
= task
->dev
;
1050 struct device
*dev
= &hisi_hba
->pdev
->dev
;
1051 struct hisi_sas_port
*port
= slot
->port
;
1052 struct scatterlist
*sg_req
, *sg_resp
;
1053 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1054 dma_addr_t req_dma_addr
;
1055 unsigned int req_len
, resp_len
;
1059 * DMA-map SMP request, response buffers
1062 sg_req
= &task
->smp_task
.smp_req
;
1063 elem
= dma_map_sg(dev
, sg_req
, 1, DMA_TO_DEVICE
);
1066 req_len
= sg_dma_len(sg_req
);
1067 req_dma_addr
= sg_dma_address(sg_req
);
1070 sg_resp
= &task
->smp_task
.smp_resp
;
1071 elem
= dma_map_sg(dev
, sg_resp
, 1, DMA_FROM_DEVICE
);
1076 resp_len
= sg_dma_len(sg_resp
);
1077 if ((req_len
& 0x3) || (resp_len
& 0x3)) {
1084 hdr
->dw0
= cpu_to_le32((port
->id
<< CMD_HDR_PORT_OFF
) |
1085 (1 << CMD_HDR_PRIORITY_OFF
) | /* high pri */
1086 (2 << CMD_HDR_CMD_OFF
)); /* smp */
1088 /* map itct entry */
1089 hdr
->dw1
= cpu_to_le32((sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
) |
1090 (1 << CMD_HDR_FRAME_TYPE_OFF
) |
1091 (DIR_NO_DATA
<< CMD_HDR_DIR_OFF
));
1094 hdr
->dw2
= cpu_to_le32((((req_len
- 4) / 4) << CMD_HDR_CFL_OFF
) |
1095 (HISI_SAS_MAX_SMP_RESP_SZ
/ 4 <<
1098 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
<< CMD_HDR_IPTT_OFF
);
1100 hdr
->cmd_table_addr
= cpu_to_le64(req_dma_addr
);
1101 hdr
->sts_buffer_addr
= cpu_to_le64(slot
->status_buffer_dma
);
1106 dma_unmap_sg(dev
, &slot
->task
->smp_task
.smp_resp
, 1,
1109 dma_unmap_sg(dev
, &slot
->task
->smp_task
.smp_req
, 1,
1114 static int prep_ssp_v2_hw(struct hisi_hba
*hisi_hba
,
1115 struct hisi_sas_slot
*slot
, int is_tmf
,
1116 struct hisi_sas_tmf_task
*tmf
)
1118 struct sas_task
*task
= slot
->task
;
1119 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1120 struct domain_device
*device
= task
->dev
;
1121 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1122 struct hisi_sas_port
*port
= slot
->port
;
1123 struct sas_ssp_task
*ssp_task
= &task
->ssp_task
;
1124 struct scsi_cmnd
*scsi_cmnd
= ssp_task
->cmd
;
1125 int has_data
= 0, rc
, priority
= is_tmf
;
1127 u32 dw1
= 0, dw2
= 0;
1129 hdr
->dw0
= cpu_to_le32((1 << CMD_HDR_RESP_REPORT_OFF
) |
1130 (2 << CMD_HDR_TLR_CTRL_OFF
) |
1131 (port
->id
<< CMD_HDR_PORT_OFF
) |
1132 (priority
<< CMD_HDR_PRIORITY_OFF
) |
1133 (1 << CMD_HDR_CMD_OFF
)); /* ssp */
1135 dw1
= 1 << CMD_HDR_VDTL_OFF
;
1137 dw1
|= 2 << CMD_HDR_FRAME_TYPE_OFF
;
1138 dw1
|= DIR_NO_DATA
<< CMD_HDR_DIR_OFF
;
1140 dw1
|= 1 << CMD_HDR_FRAME_TYPE_OFF
;
1141 switch (scsi_cmnd
->sc_data_direction
) {
1144 dw1
|= DIR_TO_DEVICE
<< CMD_HDR_DIR_OFF
;
1146 case DMA_FROM_DEVICE
:
1148 dw1
|= DIR_TO_INI
<< CMD_HDR_DIR_OFF
;
1151 dw1
&= ~CMD_HDR_DIR_MSK
;
1155 /* map itct entry */
1156 dw1
|= sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
;
1157 hdr
->dw1
= cpu_to_le32(dw1
);
1159 dw2
= (((sizeof(struct ssp_command_iu
) + sizeof(struct ssp_frame_hdr
)
1160 + 3) / 4) << CMD_HDR_CFL_OFF
) |
1161 ((HISI_SAS_MAX_SSP_RESP_SZ
/ 4) << CMD_HDR_MRFL_OFF
) |
1162 (2 << CMD_HDR_SG_MOD_OFF
);
1163 hdr
->dw2
= cpu_to_le32(dw2
);
1165 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
);
1168 rc
= prep_prd_sge_v2_hw(hisi_hba
, slot
, hdr
, task
->scatter
,
1174 hdr
->data_transfer_len
= cpu_to_le32(task
->total_xfer_len
);
1175 hdr
->cmd_table_addr
= cpu_to_le64(slot
->command_table_dma
);
1176 hdr
->sts_buffer_addr
= cpu_to_le64(slot
->status_buffer_dma
);
1178 buf_cmd
= slot
->command_table
+ sizeof(struct ssp_frame_hdr
);
1180 memcpy(buf_cmd
, &task
->ssp_task
.LUN
, 8);
1182 buf_cmd
[9] = task
->ssp_task
.task_attr
|
1183 (task
->ssp_task
.task_prio
<< 3);
1184 memcpy(buf_cmd
+ 12, task
->ssp_task
.cmd
->cmnd
,
1185 task
->ssp_task
.cmd
->cmd_len
);
1187 buf_cmd
[10] = tmf
->tmf
;
1189 case TMF_ABORT_TASK
:
1190 case TMF_QUERY_TASK
:
1192 (tmf
->tag_of_task_to_be_managed
>> 8) & 0xff;
1194 tmf
->tag_of_task_to_be_managed
& 0xff;
1204 static void sata_done_v2_hw(struct hisi_hba
*hisi_hba
, struct sas_task
*task
,
1205 struct hisi_sas_slot
*slot
)
1207 struct task_status_struct
*ts
= &task
->task_status
;
1208 struct ata_task_resp
*resp
= (struct ata_task_resp
*)ts
->buf
;
1209 struct dev_to_host_fis
*d2h
= slot
->status_buffer
+
1210 sizeof(struct hisi_sas_err_record
);
1212 resp
->frame_len
= sizeof(struct dev_to_host_fis
);
1213 memcpy(&resp
->ending_fis
[0], d2h
, sizeof(struct dev_to_host_fis
));
1215 ts
->buf_valid_size
= sizeof(*resp
);
1218 /* by default, task resp is complete */
1219 static void slot_err_v2_hw(struct hisi_hba
*hisi_hba
,
1220 struct sas_task
*task
,
1221 struct hisi_sas_slot
*slot
)
1223 struct task_status_struct
*ts
= &task
->task_status
;
1224 struct hisi_sas_err_record_v2
*err_record
= slot
->status_buffer
;
1225 u32 trans_tx_fail_type
= cpu_to_le32(err_record
->trans_tx_fail_type
);
1226 u32 trans_rx_fail_type
= cpu_to_le32(err_record
->trans_rx_fail_type
);
1227 u16 dma_tx_err_type
= cpu_to_le16(err_record
->dma_tx_err_type
);
1228 u16 sipc_rx_err_type
= cpu_to_le16(err_record
->sipc_rx_err_type
);
1229 u32 dma_rx_err_type
= cpu_to_le32(err_record
->dma_rx_err_type
);
1232 if (dma_rx_err_type
) {
1233 error
= ffs(dma_rx_err_type
)
1234 - 1 + DMA_RX_ERR_BASE
;
1235 } else if (sipc_rx_err_type
) {
1236 error
= ffs(sipc_rx_err_type
)
1237 - 1 + SIPC_RX_ERR_BASE
;
1238 } else if (dma_tx_err_type
) {
1239 error
= ffs(dma_tx_err_type
)
1240 - 1 + DMA_TX_ERR_BASE
;
1241 } else if (trans_rx_fail_type
) {
1242 error
= ffs(trans_rx_fail_type
)
1243 - 1 + TRANS_RX_FAIL_BASE
;
1244 } else if (trans_tx_fail_type
) {
1245 error
= ffs(trans_tx_fail_type
)
1246 - 1 + TRANS_TX_FAIL_BASE
;
1249 switch (task
->task_proto
) {
1250 case SAS_PROTOCOL_SSP
:
1253 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION
:
1255 ts
->stat
= SAS_OPEN_REJECT
;
1256 ts
->open_rej_reason
= SAS_OREJ_NO_DEST
;
1259 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED
:
1261 ts
->stat
= SAS_OPEN_REJECT
;
1262 ts
->open_rej_reason
= SAS_OREJ_PATH_BLOCKED
;
1265 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED
:
1267 ts
->stat
= SAS_OPEN_REJECT
;
1268 ts
->open_rej_reason
= SAS_OREJ_EPROTO
;
1271 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED
:
1273 ts
->stat
= SAS_OPEN_REJECT
;
1274 ts
->open_rej_reason
= SAS_OREJ_CONN_RATE
;
1277 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION
:
1279 ts
->stat
= SAS_OPEN_REJECT
;
1280 ts
->open_rej_reason
= SAS_OREJ_BAD_DEST
;
1283 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD
:
1285 ts
->stat
= SAS_OPEN_REJECT
;
1286 ts
->open_rej_reason
= SAS_OREJ_RSVD_RETRY
;
1289 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION
:
1291 ts
->stat
= SAS_OPEN_REJECT
;
1292 ts
->open_rej_reason
= SAS_OREJ_WRONG_DEST
;
1295 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION
:
1297 ts
->stat
= SAS_OPEN_REJECT
;
1298 ts
->open_rej_reason
= SAS_OREJ_UNKNOWN
;
1301 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER
:
1304 ts
->stat
= SAS_DEV_NO_RESPONSE
;
1307 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE
:
1309 ts
->stat
= SAS_PHY_DOWN
;
1312 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT
:
1314 ts
->stat
= SAS_OPEN_TO
;
1317 case DMA_RX_DATA_LEN_OVERFLOW
:
1319 ts
->stat
= SAS_DATA_OVERRUN
;
1323 case DMA_RX_DATA_LEN_UNDERFLOW
:
1324 case SIPC_RX_DATA_UNDERFLOW_ERR
:
1326 ts
->residual
= trans_tx_fail_type
;
1327 ts
->stat
= SAS_DATA_UNDERRUN
;
1330 case TRANS_TX_ERR_FRAME_TXED
:
1332 /* This will request a retry */
1333 ts
->stat
= SAS_QUEUE_FULL
;
1337 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS
:
1338 case TRANS_TX_ERR_PHY_NOT_ENABLE
:
1339 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER
:
1340 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT
:
1341 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED
:
1342 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT
:
1343 case TRANS_TX_ERR_WITH_BREAK_REQUEST
:
1344 case TRANS_TX_ERR_WITH_BREAK_RECEVIED
:
1345 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT
:
1346 case TRANS_TX_ERR_WITH_CLOSE_NORMAL
:
1347 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT
:
1348 case TRANS_TX_ERR_WITH_CLOSE_COMINIT
:
1349 case TRANS_TX_ERR_WITH_NAK_RECEVIED
:
1350 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT
:
1351 case TRANS_TX_ERR_WITH_IPTT_CONFLICT
:
1352 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT
:
1353 case TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR
:
1354 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR
:
1355 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM
:
1356 case TRANS_RX_ERR_WITH_BREAK_TIMEOUT
:
1357 case TRANS_RX_ERR_WITH_BREAK_REQUEST
:
1358 case TRANS_RX_ERR_WITH_BREAK_RECEVIED
:
1359 case TRANS_RX_ERR_WITH_CLOSE_NORMAL
:
1360 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT
:
1361 case TRANS_RX_ERR_WITH_CLOSE_COMINIT
:
1362 case TRANS_RX_ERR_WITH_DATA_LEN0
:
1363 case TRANS_RX_ERR_WITH_BAD_HASH
:
1364 case TRANS_RX_XRDY_WLEN_ZERO_ERR
:
1365 case TRANS_RX_SSP_FRM_LEN_ERR
:
1366 case TRANS_RX_ERR_WITH_BAD_FRM_TYPE
:
1367 case DMA_TX_UNEXP_XFER_ERR
:
1368 case DMA_TX_UNEXP_RETRANS_ERR
:
1369 case DMA_TX_XFER_LEN_OVERFLOW
:
1370 case DMA_TX_XFER_OFFSET_ERR
:
1371 case DMA_RX_DATA_OFFSET_ERR
:
1372 case DMA_RX_UNEXP_NORM_RESP_ERR
:
1373 case DMA_RX_UNEXP_RDFRAME_ERR
:
1374 case DMA_RX_UNKNOWN_FRM_ERR
:
1376 ts
->stat
= SAS_OPEN_REJECT
;
1377 ts
->open_rej_reason
= SAS_OREJ_UNKNOWN
;
1385 case SAS_PROTOCOL_SMP
:
1386 ts
->stat
= SAM_STAT_CHECK_CONDITION
;
1389 case SAS_PROTOCOL_SATA
:
1390 case SAS_PROTOCOL_STP
:
1391 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
1394 case TRANS_TX_OPEN_CNX_ERR_LOW_PHY_POWER
:
1395 case TRANS_TX_OPEN_CNX_ERR_PATHWAY_BLOCKED
:
1396 case TRANS_TX_OPEN_CNX_ERR_NO_DESTINATION
:
1398 ts
->resp
= SAS_TASK_UNDELIVERED
;
1399 ts
->stat
= SAS_DEV_NO_RESPONSE
;
1402 case TRANS_TX_OPEN_CNX_ERR_PROTOCOL_NOT_SUPPORTED
:
1403 case TRANS_TX_OPEN_CNX_ERR_CONNECTION_RATE_NOT_SUPPORTED
:
1404 case TRANS_TX_OPEN_CNX_ERR_BAD_DESTINATION
:
1405 case TRANS_TX_OPEN_CNX_ERR_BREAK_RCVD
:
1406 case TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION
:
1407 case TRANS_TX_OPEN_CNX_ERR_ZONE_VIOLATION
:
1408 case TRANS_TX_OPEN_CNX_ERR_STP_RESOURCES_BUSY
:
1410 ts
->stat
= SAS_OPEN_REJECT
;
1413 case TRANS_TX_OPEN_CNX_ERR_OPEN_TIMEOUT
:
1415 ts
->stat
= SAS_OPEN_TO
;
1418 case DMA_RX_DATA_LEN_OVERFLOW
:
1420 ts
->stat
= SAS_DATA_OVERRUN
;
1423 case TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS
:
1424 case TRANS_TX_ERR_PHY_NOT_ENABLE
:
1425 case TRANS_TX_OPEN_CNX_ERR_BY_OTHER
:
1426 case TRANS_TX_OPEN_CNX_ERR_AIP_TIMEOUT
:
1427 case TRANS_TX_OPEN_RETRY_ERR_THRESHOLD_REACHED
:
1428 case TRANS_TX_ERR_WITH_BREAK_TIMEOUT
:
1429 case TRANS_TX_ERR_WITH_BREAK_REQUEST
:
1430 case TRANS_TX_ERR_WITH_BREAK_RECEVIED
:
1431 case TRANS_TX_ERR_WITH_CLOSE_TIMEOUT
:
1432 case TRANS_TX_ERR_WITH_CLOSE_NORMAL
:
1433 case TRANS_TX_ERR_WITH_CLOSE_DWS_TIMEOUT
:
1434 case TRANS_TX_ERR_WITH_CLOSE_COMINIT
:
1435 case TRANS_TX_ERR_WITH_NAK_RECEVIED
:
1436 case TRANS_TX_ERR_WITH_ACK_NAK_TIMEOUT
:
1437 case TRANS_TX_ERR_WITH_CREDIT_TIMEOUT
:
1438 case TRANS_TX_ERR_WITH_WAIT_RECV_TIMEOUT
:
1439 case TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR
:
1440 case TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM
:
1441 case TRANS_RX_ERR_WITH_RXFIS_DECODE_ERROR
:
1442 case TRANS_RX_ERR_WITH_RXFIS_CRC_ERR
:
1443 case TRANS_RX_ERR_WITH_RXFRAME_LENGTH_OVERRUN
:
1444 case TRANS_RX_ERR_WITH_RXFIS_RX_SYNCP
:
1445 case TRANS_RX_ERR_WITH_CLOSE_NORMAL
:
1446 case TRANS_RX_ERR_WITH_CLOSE_PHY_DISABLE
:
1447 case TRANS_RX_ERR_WITH_CLOSE_DWS_TIMEOUT
:
1448 case TRANS_RX_ERR_WITH_CLOSE_COMINIT
:
1449 case TRANS_RX_ERR_WITH_DATA_LEN0
:
1450 case TRANS_RX_ERR_WITH_BAD_HASH
:
1451 case TRANS_RX_XRDY_WLEN_ZERO_ERR
:
1452 case TRANS_RX_SSP_FRM_LEN_ERR
:
1453 case SIPC_RX_FIS_STATUS_ERR_BIT_VLD
:
1454 case SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR
:
1455 case SIPC_RX_FIS_STATUS_BSY_BIT_ERR
:
1456 case SIPC_RX_WRSETUP_LEN_ODD_ERR
:
1457 case SIPC_RX_WRSETUP_LEN_ZERO_ERR
:
1458 case SIPC_RX_WRDATA_LEN_NOT_MATCH_ERR
:
1459 case SIPC_RX_SATA_UNEXP_FIS_ERR
:
1460 case DMA_RX_SATA_FRAME_TYPE_ERR
:
1461 case DMA_RX_UNEXP_RDFRAME_ERR
:
1462 case DMA_RX_PIO_DATA_LEN_ERR
:
1463 case DMA_RX_RDSETUP_STATUS_ERR
:
1464 case DMA_RX_RDSETUP_STATUS_DRQ_ERR
:
1465 case DMA_RX_RDSETUP_STATUS_BSY_ERR
:
1466 case DMA_RX_RDSETUP_LEN_ODD_ERR
:
1467 case DMA_RX_RDSETUP_LEN_ZERO_ERR
:
1468 case DMA_RX_RDSETUP_LEN_OVER_ERR
:
1469 case DMA_RX_RDSETUP_OFFSET_ERR
:
1470 case DMA_RX_RDSETUP_ACTIVE_ERR
:
1471 case DMA_RX_RDSETUP_ESTATUS_ERR
:
1472 case DMA_RX_UNKNOWN_FRM_ERR
:
1474 ts
->stat
= SAS_OPEN_REJECT
;
1479 ts
->stat
= SAS_PROTO_RESPONSE
;
1483 sata_done_v2_hw(hisi_hba
, task
, slot
);
1492 slot_complete_v2_hw(struct hisi_hba
*hisi_hba
, struct hisi_sas_slot
*slot
,
1495 struct sas_task
*task
= slot
->task
;
1496 struct hisi_sas_device
*sas_dev
;
1497 struct device
*dev
= &hisi_hba
->pdev
->dev
;
1498 struct task_status_struct
*ts
;
1499 struct domain_device
*device
;
1500 enum exec_status sts
;
1501 struct hisi_sas_complete_v2_hdr
*complete_queue
=
1502 hisi_hba
->complete_hdr
[slot
->cmplt_queue
];
1503 struct hisi_sas_complete_v2_hdr
*complete_hdr
=
1504 &complete_queue
[slot
->cmplt_queue_slot
];
1506 if (unlikely(!task
|| !task
->lldd_task
|| !task
->dev
))
1509 ts
= &task
->task_status
;
1511 sas_dev
= device
->lldd_dev
;
1513 task
->task_state_flags
&=
1514 ~(SAS_TASK_STATE_PENDING
| SAS_TASK_AT_INITIATOR
);
1515 task
->task_state_flags
|= SAS_TASK_STATE_DONE
;
1517 memset(ts
, 0, sizeof(*ts
));
1518 ts
->resp
= SAS_TASK_COMPLETE
;
1520 if (unlikely(!sas_dev
|| abort
)) {
1522 dev_dbg(dev
, "slot complete: port has not device\n");
1523 ts
->stat
= SAS_PHY_DOWN
;
1527 if ((complete_hdr
->dw0
& CMPLT_HDR_ERX_MSK
) &&
1528 (!(complete_hdr
->dw0
& CMPLT_HDR_RSPNS_XFRD_MSK
))) {
1530 slot_err_v2_hw(hisi_hba
, task
, slot
);
1531 if (unlikely(slot
->abort
)) {
1532 queue_work(hisi_hba
->wq
, &slot
->abort_slot
);
1533 /* immediately return and do not complete */
1539 switch (task
->task_proto
) {
1540 case SAS_PROTOCOL_SSP
:
1542 struct ssp_response_iu
*iu
= slot
->status_buffer
+
1543 sizeof(struct hisi_sas_err_record
);
1545 sas_ssp_task_response(dev
, task
, iu
);
1548 case SAS_PROTOCOL_SMP
:
1550 struct scatterlist
*sg_resp
= &task
->smp_task
.smp_resp
;
1553 ts
->stat
= SAM_STAT_GOOD
;
1554 to
= kmap_atomic(sg_page(sg_resp
));
1556 dma_unmap_sg(dev
, &task
->smp_task
.smp_resp
, 1,
1558 dma_unmap_sg(dev
, &task
->smp_task
.smp_req
, 1,
1560 memcpy(to
+ sg_resp
->offset
,
1561 slot
->status_buffer
+
1562 sizeof(struct hisi_sas_err_record
),
1563 sg_dma_len(sg_resp
));
1567 case SAS_PROTOCOL_SATA
:
1568 case SAS_PROTOCOL_STP
:
1569 case SAS_PROTOCOL_SATA
| SAS_PROTOCOL_STP
:
1571 ts
->stat
= SAM_STAT_GOOD
;
1572 sata_done_v2_hw(hisi_hba
, task
, slot
);
1576 ts
->stat
= SAM_STAT_CHECK_CONDITION
;
1580 if (!slot
->port
->port_attached
) {
1581 dev_err(dev
, "slot complete: port %d has removed\n",
1582 slot
->port
->sas_port
.id
);
1583 ts
->stat
= SAS_PHY_DOWN
;
1587 if (sas_dev
&& sas_dev
->running_req
)
1588 sas_dev
->running_req
--;
1590 hisi_sas_slot_task_free(hisi_hba
, task
, slot
);
1593 if (task
->task_done
)
1594 task
->task_done(task
);
1599 static u8
get_ata_protocol(u8 cmd
, int direction
)
1602 case ATA_CMD_FPDMA_WRITE
:
1603 case ATA_CMD_FPDMA_READ
:
1604 return SATA_PROTOCOL_FPDMA
;
1606 case ATA_CMD_ID_ATA
:
1607 case ATA_CMD_PMP_READ
:
1608 case ATA_CMD_READ_LOG_EXT
:
1609 case ATA_CMD_PIO_READ
:
1610 case ATA_CMD_PIO_READ_EXT
:
1611 case ATA_CMD_PMP_WRITE
:
1612 case ATA_CMD_WRITE_LOG_EXT
:
1613 case ATA_CMD_PIO_WRITE
:
1614 case ATA_CMD_PIO_WRITE_EXT
:
1615 return SATA_PROTOCOL_PIO
;
1618 case ATA_CMD_READ_EXT
:
1619 case ATA_CMD_READ_LOG_DMA_EXT
:
1621 case ATA_CMD_WRITE_EXT
:
1622 case ATA_CMD_WRITE_QUEUED
:
1623 case ATA_CMD_WRITE_LOG_DMA_EXT
:
1624 return SATA_PROTOCOL_DMA
;
1626 case ATA_CMD_DOWNLOAD_MICRO
:
1627 case ATA_CMD_DEV_RESET
:
1628 case ATA_CMD_CHK_POWER
:
1630 case ATA_CMD_FLUSH_EXT
:
1631 case ATA_CMD_VERIFY
:
1632 case ATA_CMD_VERIFY_EXT
:
1633 case ATA_CMD_SET_FEATURES
:
1634 case ATA_CMD_STANDBY
:
1635 case ATA_CMD_STANDBYNOW1
:
1636 return SATA_PROTOCOL_NONDATA
;
1638 if (direction
== DMA_NONE
)
1639 return SATA_PROTOCOL_NONDATA
;
1640 return SATA_PROTOCOL_PIO
;
1644 static int get_ncq_tag_v2_hw(struct sas_task
*task
, u32
*tag
)
1646 struct ata_queued_cmd
*qc
= task
->uldd_task
;
1649 if (qc
->tf
.command
== ATA_CMD_FPDMA_WRITE
||
1650 qc
->tf
.command
== ATA_CMD_FPDMA_READ
) {
1658 static int prep_ata_v2_hw(struct hisi_hba
*hisi_hba
,
1659 struct hisi_sas_slot
*slot
)
1661 struct sas_task
*task
= slot
->task
;
1662 struct domain_device
*device
= task
->dev
;
1663 struct domain_device
*parent_dev
= device
->parent
;
1664 struct hisi_sas_device
*sas_dev
= device
->lldd_dev
;
1665 struct hisi_sas_cmd_hdr
*hdr
= slot
->cmd_hdr
;
1666 struct hisi_sas_port
*port
= device
->port
->lldd_port
;
1668 int has_data
= 0, rc
= 0, hdr_tag
= 0;
1669 u32 dw1
= 0, dw2
= 0;
1673 hdr
->dw0
= cpu_to_le32(port
->id
<< CMD_HDR_PORT_OFF
);
1674 if (parent_dev
&& DEV_IS_EXPANDER(parent_dev
->dev_type
))
1675 hdr
->dw0
|= cpu_to_le32(3 << CMD_HDR_CMD_OFF
);
1677 hdr
->dw0
|= cpu_to_le32(4 << CMD_HDR_CMD_OFF
);
1680 switch (task
->data_dir
) {
1683 dw1
|= DIR_TO_DEVICE
<< CMD_HDR_DIR_OFF
;
1685 case DMA_FROM_DEVICE
:
1687 dw1
|= DIR_TO_INI
<< CMD_HDR_DIR_OFF
;
1690 dw1
&= ~CMD_HDR_DIR_MSK
;
1693 if (0 == task
->ata_task
.fis
.command
)
1694 dw1
|= 1 << CMD_HDR_RESET_OFF
;
1696 dw1
|= (get_ata_protocol(task
->ata_task
.fis
.command
, task
->data_dir
))
1697 << CMD_HDR_FRAME_TYPE_OFF
;
1698 dw1
|= sas_dev
->device_id
<< CMD_HDR_DEV_ID_OFF
;
1699 hdr
->dw1
= cpu_to_le32(dw1
);
1702 if (task
->ata_task
.use_ncq
&& get_ncq_tag_v2_hw(task
, &hdr_tag
)) {
1703 task
->ata_task
.fis
.sector_count
|= (u8
) (hdr_tag
<< 3);
1704 dw2
|= hdr_tag
<< CMD_HDR_NCQ_TAG_OFF
;
1707 dw2
|= (HISI_SAS_MAX_STP_RESP_SZ
/ 4) << CMD_HDR_CFL_OFF
|
1708 2 << CMD_HDR_SG_MOD_OFF
;
1709 hdr
->dw2
= cpu_to_le32(dw2
);
1712 hdr
->transfer_tags
= cpu_to_le32(slot
->idx
);
1715 rc
= prep_prd_sge_v2_hw(hisi_hba
, slot
, hdr
, task
->scatter
,
1722 hdr
->data_transfer_len
= cpu_to_le32(task
->total_xfer_len
);
1723 hdr
->cmd_table_addr
= cpu_to_le64(slot
->command_table_dma
);
1724 hdr
->sts_buffer_addr
= cpu_to_le64(slot
->status_buffer_dma
);
1726 buf_cmd
= slot
->command_table
;
1728 if (likely(!task
->ata_task
.device_control_reg_update
))
1729 task
->ata_task
.fis
.flags
|= 0x80; /* C=1: update ATA cmd reg */
1730 /* fill in command FIS */
1731 memcpy(buf_cmd
, &task
->ata_task
.fis
, sizeof(struct host_to_dev_fis
));
1736 static int phy_up_v2_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1739 u32 context
, port_id
, link_rate
, hard_phy_linkrate
;
1740 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1741 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1742 struct device
*dev
= &hisi_hba
->pdev
->dev
;
1743 u32
*frame_rcvd
= (u32
*)sas_phy
->frame_rcvd
;
1744 struct sas_identify_frame
*id
= (struct sas_identify_frame
*)frame_rcvd
;
1746 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_PHY_ENA_MSK
, 1);
1748 /* Check for SATA dev */
1749 context
= hisi_sas_read32(hisi_hba
, PHY_CONTEXT
);
1750 if (context
& (1 << phy_no
))
1754 u32 port_state
= hisi_sas_read32(hisi_hba
, PORT_STATE
);
1756 port_id
= (port_state
& PORT_STATE_PHY8_PORT_NUM_MSK
) >>
1757 PORT_STATE_PHY8_PORT_NUM_OFF
;
1758 link_rate
= (port_state
& PORT_STATE_PHY8_CONN_RATE_MSK
) >>
1759 PORT_STATE_PHY8_CONN_RATE_OFF
;
1761 port_id
= hisi_sas_read32(hisi_hba
, PHY_PORT_NUM_MA
);
1762 port_id
= (port_id
>> (4 * phy_no
)) & 0xf;
1763 link_rate
= hisi_sas_read32(hisi_hba
, PHY_CONN_RATE
);
1764 link_rate
= (link_rate
>> (phy_no
* 4)) & 0xf;
1767 if (port_id
== 0xf) {
1768 dev_err(dev
, "phyup: phy%d invalid portid\n", phy_no
);
1773 for (i
= 0; i
< 6; i
++) {
1774 u32 idaf
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1775 RX_IDAF_DWORD0
+ (i
* 4));
1776 frame_rcvd
[i
] = __swab32(idaf
);
1779 /* Get the linkrates */
1780 link_rate
= hisi_sas_read32(hisi_hba
, PHY_CONN_RATE
);
1781 link_rate
= (link_rate
>> (phy_no
* 4)) & 0xf;
1782 sas_phy
->linkrate
= link_rate
;
1783 hard_phy_linkrate
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1785 phy
->maximum_linkrate
= hard_phy_linkrate
& 0xf;
1786 phy
->minimum_linkrate
= (hard_phy_linkrate
>> 4) & 0xf;
1788 sas_phy
->oob_mode
= SAS_OOB_MODE
;
1789 memcpy(sas_phy
->attached_sas_addr
, &id
->sas_addr
, SAS_ADDR_SIZE
);
1790 dev_info(dev
, "phyup: phy%d link_rate=%d\n", phy_no
, link_rate
);
1791 phy
->port_id
= port_id
;
1792 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
1793 phy
->phy_type
|= PORT_TYPE_SAS
;
1794 phy
->phy_attached
= 1;
1795 phy
->identify
.device_type
= id
->dev_type
;
1796 phy
->frame_rcvd_size
= sizeof(struct sas_identify_frame
);
1797 if (phy
->identify
.device_type
== SAS_END_DEVICE
)
1798 phy
->identify
.target_port_protocols
=
1800 else if (phy
->identify
.device_type
!= SAS_PHY_UNUSED
)
1801 phy
->identify
.target_port_protocols
=
1803 queue_work(hisi_hba
->wq
, &phy
->phyup_ws
);
1806 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
,
1807 CHL_INT0_SL_PHY_ENABLE_MSK
);
1808 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_PHY_ENA_MSK
, 0);
1813 static int phy_down_v2_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1816 u32 phy_cfg
, phy_state
;
1818 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_NOT_RDY_MSK
, 1);
1820 phy_cfg
= hisi_sas_phy_read32(hisi_hba
, phy_no
, PHY_CFG
);
1822 phy_state
= hisi_sas_read32(hisi_hba
, PHY_STATE
);
1824 hisi_sas_phy_down(hisi_hba
, phy_no
, (phy_state
& 1 << phy_no
) ? 1 : 0);
1826 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
, CHL_INT0_NOT_RDY_MSK
);
1827 hisi_sas_phy_write32(hisi_hba
, phy_no
, PHYCTRL_NOT_RDY_MSK
, 0);
1832 static irqreturn_t
int_phy_updown_v2_hw(int irq_no
, void *p
)
1834 struct hisi_hba
*hisi_hba
= p
;
1837 irqreturn_t res
= IRQ_HANDLED
;
1839 irq_msk
= (hisi_sas_read32(hisi_hba
, HGC_INVLD_DQE_INFO
)
1840 >> HGC_INVLD_DQE_INFO_FB_CH0_OFF
) & 0x1ff;
1843 u32 irq_value
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1846 if (irq_value
& CHL_INT0_SL_PHY_ENABLE_MSK
)
1848 if (phy_up_v2_hw(phy_no
, hisi_hba
)) {
1853 if (irq_value
& CHL_INT0_NOT_RDY_MSK
)
1855 if (phy_down_v2_hw(phy_no
, hisi_hba
)) {
1868 static void phy_bcast_v2_hw(int phy_no
, struct hisi_hba
*hisi_hba
)
1870 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[phy_no
];
1871 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
1872 struct sas_ha_struct
*sas_ha
= &hisi_hba
->sha
;
1873 unsigned long flags
;
1875 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_RX_BCAST_CHK_MSK
, 1);
1877 spin_lock_irqsave(&hisi_hba
->lock
, flags
);
1878 sas_ha
->notify_port_event(sas_phy
, PORTE_BROADCAST_RCVD
);
1879 spin_unlock_irqrestore(&hisi_hba
->lock
, flags
);
1881 hisi_sas_phy_write32(hisi_hba
, phy_no
, CHL_INT0
,
1882 CHL_INT0_SL_RX_BCST_ACK_MSK
);
1883 hisi_sas_phy_write32(hisi_hba
, phy_no
, SL_RX_BCAST_CHK_MSK
, 0);
1886 static irqreturn_t
int_chnl_int_v2_hw(int irq_no
, void *p
)
1888 struct hisi_hba
*hisi_hba
= p
;
1889 struct device
*dev
= &hisi_hba
->pdev
->dev
;
1890 u32 ent_msk
, ent_tmp
, irq_msk
;
1893 ent_msk
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC_MSK3
);
1895 ent_msk
|= ENT_INT_SRC_MSK3_ENT95_MSK_MSK
;
1896 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, ent_msk
);
1898 irq_msk
= (hisi_sas_read32(hisi_hba
, HGC_INVLD_DQE_INFO
) >>
1899 HGC_INVLD_DQE_INFO_FB_CH3_OFF
) & 0x1ff;
1902 if (irq_msk
& (1 << phy_no
)) {
1903 u32 irq_value0
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1905 u32 irq_value1
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1907 u32 irq_value2
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
1911 if (irq_value1
& (CHL_INT1_DMAC_RX_ECC_ERR_MSK
|
1912 CHL_INT1_DMAC_TX_ECC_ERR_MSK
))
1913 panic("%s: DMAC RX/TX ecc bad error! (0x%x)",
1914 dev_name(dev
), irq_value1
);
1916 hisi_sas_phy_write32(hisi_hba
, phy_no
,
1917 CHL_INT1
, irq_value1
);
1921 hisi_sas_phy_write32(hisi_hba
, phy_no
,
1922 CHL_INT2
, irq_value2
);
1926 if (irq_value0
& CHL_INT0_SL_RX_BCST_ACK_MSK
)
1927 phy_bcast_v2_hw(phy_no
, hisi_hba
);
1929 hisi_sas_phy_write32(hisi_hba
, phy_no
,
1930 CHL_INT0
, irq_value0
1931 & (~CHL_INT0_HOTPLUG_TOUT_MSK
)
1932 & (~CHL_INT0_SL_PHY_ENABLE_MSK
)
1933 & (~CHL_INT0_NOT_RDY_MSK
));
1936 irq_msk
&= ~(1 << phy_no
);
1940 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK3
, ent_tmp
);
1945 static irqreturn_t
cq_interrupt_v2_hw(int irq_no
, void *p
)
1947 struct hisi_sas_cq
*cq
= p
;
1948 struct hisi_hba
*hisi_hba
= cq
->hisi_hba
;
1949 struct hisi_sas_slot
*slot
;
1950 struct hisi_sas_itct
*itct
;
1951 struct hisi_sas_complete_v2_hdr
*complete_queue
;
1952 u32 irq_value
, rd_point
, wr_point
, dev_id
;
1955 complete_queue
= hisi_hba
->complete_hdr
[queue
];
1956 irq_value
= hisi_sas_read32(hisi_hba
, OQ_INT_SRC
);
1958 hisi_sas_write32(hisi_hba
, OQ_INT_SRC
, 1 << queue
);
1960 rd_point
= hisi_sas_read32(hisi_hba
, COMPL_Q_0_RD_PTR
+
1962 wr_point
= hisi_sas_read32(hisi_hba
, COMPL_Q_0_WR_PTR
+
1965 while (rd_point
!= wr_point
) {
1966 struct hisi_sas_complete_v2_hdr
*complete_hdr
;
1969 complete_hdr
= &complete_queue
[rd_point
];
1971 /* Check for NCQ completion */
1972 if (complete_hdr
->act
) {
1973 u32 act_tmp
= complete_hdr
->act
;
1974 int ncq_tag_count
= ffs(act_tmp
);
1976 dev_id
= (complete_hdr
->dw1
& CMPLT_HDR_DEV_ID_MSK
) >>
1977 CMPLT_HDR_DEV_ID_OFF
;
1978 itct
= &hisi_hba
->itct
[dev_id
];
1980 /* The NCQ tags are held in the itct header */
1981 while (ncq_tag_count
) {
1982 __le64
*ncq_tag
= &itct
->qw4_15
[0];
1985 iptt
= (ncq_tag
[ncq_tag_count
/ 5]
1986 >> (ncq_tag_count
% 5) * 12) & 0xfff;
1988 slot
= &hisi_hba
->slot_info
[iptt
];
1989 slot
->cmplt_queue_slot
= rd_point
;
1990 slot
->cmplt_queue
= queue
;
1991 slot_complete_v2_hw(hisi_hba
, slot
, 0);
1993 act_tmp
&= ~(1 << ncq_tag_count
);
1994 ncq_tag_count
= ffs(act_tmp
);
1997 iptt
= (complete_hdr
->dw1
) & CMPLT_HDR_IPTT_MSK
;
1998 slot
= &hisi_hba
->slot_info
[iptt
];
1999 slot
->cmplt_queue_slot
= rd_point
;
2000 slot
->cmplt_queue
= queue
;
2001 slot_complete_v2_hw(hisi_hba
, slot
, 0);
2004 if (++rd_point
>= HISI_SAS_QUEUE_SLOTS
)
2008 /* update rd_point */
2009 hisi_sas_write32(hisi_hba
, COMPL_Q_0_RD_PTR
+ (0x14 * queue
), rd_point
);
2013 static irqreturn_t
sata_int_v2_hw(int irq_no
, void *p
)
2015 struct hisi_sas_phy
*phy
= p
;
2016 struct hisi_hba
*hisi_hba
= phy
->hisi_hba
;
2017 struct asd_sas_phy
*sas_phy
= &phy
->sas_phy
;
2018 struct device
*dev
= &hisi_hba
->pdev
->dev
;
2019 struct hisi_sas_initial_fis
*initial_fis
;
2020 struct dev_to_host_fis
*fis
;
2021 u32 ent_tmp
, ent_msk
, ent_int
, port_id
, link_rate
, hard_phy_linkrate
;
2022 irqreturn_t res
= IRQ_HANDLED
;
2023 u8 attached_sas_addr
[SAS_ADDR_SIZE
] = {0};
2026 phy_no
= sas_phy
->id
;
2027 initial_fis
= &hisi_hba
->initial_fis
[phy_no
];
2028 fis
= &initial_fis
->fis
;
2030 offset
= 4 * (phy_no
/ 4);
2031 ent_msk
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC_MSK1
+ offset
);
2032 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK1
+ offset
,
2033 ent_msk
| 1 << ((phy_no
% 4) * 8));
2035 ent_int
= hisi_sas_read32(hisi_hba
, ENT_INT_SRC1
+ offset
);
2036 ent_tmp
= ent_int
& (1 << (ENT_INT_SRC1_D2H_FIS_CH1_OFF
*
2038 ent_int
>>= ENT_INT_SRC1_D2H_FIS_CH1_OFF
* (phy_no
% 4);
2039 if ((ent_int
& ENT_INT_SRC1_D2H_FIS_CH0_MSK
) == 0) {
2040 dev_warn(dev
, "sata int: phy%d did not receive FIS\n", phy_no
);
2045 if (unlikely(phy_no
== 8)) {
2046 u32 port_state
= hisi_sas_read32(hisi_hba
, PORT_STATE
);
2048 port_id
= (port_state
& PORT_STATE_PHY8_PORT_NUM_MSK
) >>
2049 PORT_STATE_PHY8_PORT_NUM_OFF
;
2050 link_rate
= (port_state
& PORT_STATE_PHY8_CONN_RATE_MSK
) >>
2051 PORT_STATE_PHY8_CONN_RATE_OFF
;
2053 port_id
= hisi_sas_read32(hisi_hba
, PHY_PORT_NUM_MA
);
2054 port_id
= (port_id
>> (4 * phy_no
)) & 0xf;
2055 link_rate
= hisi_sas_read32(hisi_hba
, PHY_CONN_RATE
);
2056 link_rate
= (link_rate
>> (phy_no
* 4)) & 0xf;
2059 if (port_id
== 0xf) {
2060 dev_err(dev
, "sata int: phy%d invalid portid\n", phy_no
);
2065 sas_phy
->linkrate
= link_rate
;
2066 hard_phy_linkrate
= hisi_sas_phy_read32(hisi_hba
, phy_no
,
2068 phy
->maximum_linkrate
= hard_phy_linkrate
& 0xf;
2069 phy
->minimum_linkrate
= (hard_phy_linkrate
>> 4) & 0xf;
2071 sas_phy
->oob_mode
= SATA_OOB_MODE
;
2072 /* Make up some unique SAS address */
2073 attached_sas_addr
[0] = 0x50;
2074 attached_sas_addr
[7] = phy_no
;
2075 memcpy(sas_phy
->attached_sas_addr
, attached_sas_addr
, SAS_ADDR_SIZE
);
2076 memcpy(sas_phy
->frame_rcvd
, fis
, sizeof(struct dev_to_host_fis
));
2077 dev_info(dev
, "sata int phyup: phy%d link_rate=%d\n", phy_no
, link_rate
);
2078 phy
->phy_type
&= ~(PORT_TYPE_SAS
| PORT_TYPE_SATA
);
2079 phy
->port_id
= port_id
;
2080 phy
->phy_type
|= PORT_TYPE_SATA
;
2081 phy
->phy_attached
= 1;
2082 phy
->identify
.device_type
= SAS_SATA_DEV
;
2083 phy
->frame_rcvd_size
= sizeof(struct dev_to_host_fis
);
2084 phy
->identify
.target_port_protocols
= SAS_PROTOCOL_SATA
;
2085 queue_work(hisi_hba
->wq
, &phy
->phyup_ws
);
2088 hisi_sas_write32(hisi_hba
, ENT_INT_SRC1
+ offset
, ent_tmp
);
2089 hisi_sas_write32(hisi_hba
, ENT_INT_SRC_MSK1
+ offset
, ent_msk
);
2094 static irq_handler_t phy_interrupts
[HISI_SAS_PHY_INT_NR
] = {
2095 int_phy_updown_v2_hw
,
2100 * There is a limitation in the hip06 chipset that we need
2101 * to map in all mbigen interrupts, even if they are not used.
2103 static int interrupt_init_v2_hw(struct hisi_hba
*hisi_hba
)
2105 struct platform_device
*pdev
= hisi_hba
->pdev
;
2106 struct device
*dev
= &pdev
->dev
;
2107 int i
, irq
, rc
, irq_map
[128];
2110 for (i
= 0; i
< 128; i
++)
2111 irq_map
[i
] = platform_get_irq(pdev
, i
);
2113 for (i
= 0; i
< HISI_SAS_PHY_INT_NR
; i
++) {
2116 irq
= irq_map
[idx
+ 1]; /* Phy up/down is irq1 */
2118 dev_err(dev
, "irq init: fail map phy interrupt %d\n",
2123 rc
= devm_request_irq(dev
, irq
, phy_interrupts
[i
], 0,
2124 DRV_NAME
" phy", hisi_hba
);
2126 dev_err(dev
, "irq init: could not request "
2127 "phy interrupt %d, rc=%d\n",
2133 for (i
= 0; i
< hisi_hba
->n_phy
; i
++) {
2134 struct hisi_sas_phy
*phy
= &hisi_hba
->phy
[i
];
2135 int idx
= i
+ 72; /* First SATA interrupt is irq72 */
2139 dev_err(dev
, "irq init: fail map phy interrupt %d\n",
2144 rc
= devm_request_irq(dev
, irq
, sata_int_v2_hw
, 0,
2145 DRV_NAME
" sata", phy
);
2147 dev_err(dev
, "irq init: could not request "
2148 "sata interrupt %d, rc=%d\n",
2154 for (i
= 0; i
< hisi_hba
->queue_count
; i
++) {
2155 int idx
= i
+ 96; /* First cq interrupt is irq96 */
2160 "irq init: could not map cq interrupt %d\n",
2164 rc
= devm_request_irq(dev
, irq
, cq_interrupt_v2_hw
, 0,
2165 DRV_NAME
" cq", &hisi_hba
->cq
[i
]);
2168 "irq init: could not request cq interrupt %d, rc=%d\n",
2177 static int hisi_sas_v2_init(struct hisi_hba
*hisi_hba
)
2181 rc
= hw_init_v2_hw(hisi_hba
);
2185 rc
= interrupt_init_v2_hw(hisi_hba
);
2189 phys_init_v2_hw(hisi_hba
);
2194 static const struct hisi_sas_hw hisi_sas_v2_hw
= {
2195 .hw_init
= hisi_sas_v2_init
,
2196 .setup_itct
= setup_itct_v2_hw
,
2197 .slot_index_alloc
= slot_index_alloc_quirk_v2_hw
,
2198 .sl_notify
= sl_notify_v2_hw
,
2199 .get_wideport_bitmap
= get_wideport_bitmap_v2_hw
,
2200 .free_device
= free_device_v2_hw
,
2201 .prep_smp
= prep_smp_v2_hw
,
2202 .prep_ssp
= prep_ssp_v2_hw
,
2203 .prep_stp
= prep_ata_v2_hw
,
2204 .get_free_slot
= get_free_slot_v2_hw
,
2205 .start_delivery
= start_delivery_v2_hw
,
2206 .slot_complete
= slot_complete_v2_hw
,
2207 .phy_enable
= enable_phy_v2_hw
,
2208 .phy_disable
= disable_phy_v2_hw
,
2209 .phy_hard_reset
= phy_hard_reset_v2_hw
,
2210 .max_command_entries
= HISI_SAS_COMMAND_ENTRIES_V2_HW
,
2211 .complete_hdr_size
= sizeof(struct hisi_sas_complete_v2_hdr
),
2214 static int hisi_sas_v2_probe(struct platform_device
*pdev
)
2216 return hisi_sas_probe(pdev
, &hisi_sas_v2_hw
);
2219 static int hisi_sas_v2_remove(struct platform_device
*pdev
)
2221 return hisi_sas_remove(pdev
);
2224 static const struct of_device_id sas_v2_of_match
[] = {
2225 { .compatible
= "hisilicon,hip06-sas-v2",},
2228 MODULE_DEVICE_TABLE(of
, sas_v2_of_match
);
2230 static struct platform_driver hisi_sas_v2_driver
= {
2231 .probe
= hisi_sas_v2_probe
,
2232 .remove
= hisi_sas_v2_remove
,
2235 .of_match_table
= sas_v2_of_match
,
2239 module_platform_driver(hisi_sas_v2_driver
);
2241 MODULE_LICENSE("GPL");
2242 MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
2243 MODULE_DESCRIPTION("HISILICON SAS controller v2 hw driver");
2244 MODULE_ALIAS("platform:" DRV_NAME
);