sata_mv ncq Rename base to port mmio
[deliverable/linux.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
4a05e209
JG
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
20f733e7
BR
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
20f733e7 68#include <linux/dma-mapping.h>
a9524a76 69#include <linux/device.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
6c08772e 72#include <scsi/scsi_device.h>
20f733e7 73#include <linux/libata.h>
20f733e7
BR
74
75#define DRV_NAME "sata_mv"
6c08772e 76#define DRV_VERSION "1.01"
20f733e7
BR
77
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
20f733e7 95 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 96 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
20f733e7
BR
99
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
31961943
BR
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
20f733e7
BR
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
31961943 122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
47c2b677 131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 132
31961943
BR
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
20f733e7
BR
147 /* PCI interface registers */
148
31961943
BR
149 PCI_COMMAND_OFS = 0xc00,
150
20f733e7
BR
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
522479fb
JG
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
02a121da
ML
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
02a121da
ML
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 174
20f733e7
BR
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
fb621e2f
JG
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
20f733e7
BR
197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
31961943 202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
31961943
BR
207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
47c2b677 213 PHY_MODE3 = 0x310,
bca1c4eb
JG
214 PHY_MODE4 = 0x314,
215 PHY_MODE2 = 0x330,
c9d39130
JG
216 MV5_PHY_MODE = 0x74,
217 MV5_LT_MODE = 0x30,
218 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
219 SATA_INTERFACE_CTL = 0x050,
220
221 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
222
223 /* Port registers */
224 EDMA_CFG_OFS = 0,
31961943
BR
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
230
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
233 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
234 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
235 EDMA_ERR_DEV = (1 << 2), /* device error */
236 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
237 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
238 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
239 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
240 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 241 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 242 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
243 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
244 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
245 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
246 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 247
6c1153e0 248 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
249 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
250 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
251 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
252 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
253
6c1153e0 254 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 255
6c1153e0 256 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
257 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
258 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
259 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
260 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
261 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
262
6c1153e0 263 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 264
6c1153e0 265 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
266 EDMA_ERR_OVERRUN_5 = (1 << 5),
267 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
268
269 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
270 EDMA_ERR_LNK_CTRL_RX_1 |
271 EDMA_ERR_LNK_CTRL_RX_3 |
272 EDMA_ERR_LNK_CTRL_TX,
273
bdd4ddde
JG
274 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
275 EDMA_ERR_PRD_PAR |
276 EDMA_ERR_DEV_DCON |
277 EDMA_ERR_DEV_CON |
278 EDMA_ERR_SERR |
279 EDMA_ERR_SELF_DIS |
6c1153e0 280 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
281 EDMA_ERR_CRPB_PAR |
282 EDMA_ERR_INTRL_PAR |
283 EDMA_ERR_IORDY |
284 EDMA_ERR_LNK_CTRL_RX_2 |
285 EDMA_ERR_LNK_DATA_RX |
286 EDMA_ERR_LNK_DATA_TX |
287 EDMA_ERR_TRANS_PROTO,
288 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
289 EDMA_ERR_PRD_PAR |
290 EDMA_ERR_DEV_DCON |
291 EDMA_ERR_DEV_CON |
292 EDMA_ERR_OVERRUN_5 |
293 EDMA_ERR_UNDERRUN_5 |
294 EDMA_ERR_SELF_DIS_5 |
6c1153e0 295 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
296 EDMA_ERR_CRPB_PAR |
297 EDMA_ERR_INTRL_PAR |
298 EDMA_ERR_IORDY,
20f733e7 299
31961943
BR
300 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
301 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
302
303 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
304 EDMA_REQ_Q_PTR_SHIFT = 5,
305
306 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
307 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
308 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
309 EDMA_RSP_Q_PTR_SHIFT = 3,
310
0ea9e179
JG
311 EDMA_CMD_OFS = 0x28, /* EDMA command register */
312 EDMA_EN = (1 << 0), /* enable EDMA */
313 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
314 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 315
c9d39130 316 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 317 EDMA_ARB_CFG = 0x38,
bca1c4eb 318
31961943
BR
319 /* Host private flags (hp_flags) */
320 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
321 MV_HP_ERRATA_50XXB0 = (1 << 1),
322 MV_HP_ERRATA_50XXB2 = (1 << 2),
323 MV_HP_ERRATA_60X1B2 = (1 << 3),
324 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 325 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
326 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
327 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
328 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 329 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 330
31961943 331 /* Port private flags (pp_flags) */
0ea9e179
JG
332 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
333 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
334};
335
ee9ccdf7
JG
336#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
337#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 338#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 339
095fec88 340enum {
baf14aa1
JG
341 /* DMA boundary 0xffff is required by the s/g splitting
342 * we need on /length/ in mv_fill-sg().
343 */
344 MV_DMA_BOUNDARY = 0xffffU,
095fec88 345
0ea9e179
JG
346 /* mask of register bits containing lower 32 bits
347 * of EDMA request queue DMA address
348 */
095fec88
JG
349 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
350
0ea9e179 351 /* ditto, for response queue */
095fec88
JG
352 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
353};
354
522479fb
JG
355enum chip_type {
356 chip_504x,
357 chip_508x,
358 chip_5080,
359 chip_604x,
360 chip_608x,
e4e7b892
JG
361 chip_6042,
362 chip_7042,
522479fb
JG
363};
364
31961943
BR
365/* Command ReQuest Block: 32B */
366struct mv_crqb {
e1469874
ML
367 __le32 sg_addr;
368 __le32 sg_addr_hi;
369 __le16 ctrl_flags;
370 __le16 ata_cmd[11];
31961943 371};
20f733e7 372
e4e7b892 373struct mv_crqb_iie {
e1469874
ML
374 __le32 addr;
375 __le32 addr_hi;
376 __le32 flags;
377 __le32 len;
378 __le32 ata_cmd[4];
e4e7b892
JG
379};
380
31961943
BR
381/* Command ResPonse Block: 8B */
382struct mv_crpb {
e1469874
ML
383 __le16 id;
384 __le16 flags;
385 __le32 tmstmp;
20f733e7
BR
386};
387
31961943
BR
388/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
389struct mv_sg {
e1469874
ML
390 __le32 addr;
391 __le32 flags_size;
392 __le32 addr_hi;
393 __le32 reserved;
31961943 394};
20f733e7 395
31961943
BR
396struct mv_port_priv {
397 struct mv_crqb *crqb;
398 dma_addr_t crqb_dma;
399 struct mv_crpb *crpb;
400 dma_addr_t crpb_dma;
401 struct mv_sg *sg_tbl;
402 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
403
404 unsigned int req_idx;
405 unsigned int resp_idx;
406
31961943
BR
407 u32 pp_flags;
408};
409
bca1c4eb
JG
410struct mv_port_signal {
411 u32 amps;
412 u32 pre;
413};
414
02a121da
ML
415struct mv_host_priv {
416 u32 hp_flags;
417 struct mv_port_signal signal[8];
418 const struct mv_hw_ops *ops;
419 u32 irq_cause_ofs;
420 u32 irq_mask_ofs;
421 u32 unmask_all_irqs;
422};
423
47c2b677 424struct mv_hw_ops {
2a47ce06
JG
425 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
426 unsigned int port);
47c2b677
JG
427 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
428 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
429 void __iomem *mmio);
c9d39130
JG
430 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
431 unsigned int n_hc);
522479fb
JG
432 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
433 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
434};
435
20f733e7 436static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
437static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
438static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
439static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
440static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
441static int mv_port_start(struct ata_port *ap);
442static void mv_port_stop(struct ata_port *ap);
443static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 444static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 445static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
446static void mv_error_handler(struct ata_port *ap);
447static void mv_post_int_cmd(struct ata_queued_cmd *qc);
448static void mv_eh_freeze(struct ata_port *ap);
449static void mv_eh_thaw(struct ata_port *ap);
20f733e7
BR
450static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
451
2a47ce06
JG
452static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
453 unsigned int port);
47c2b677
JG
454static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
455static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
456 void __iomem *mmio);
c9d39130
JG
457static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
458 unsigned int n_hc);
522479fb
JG
459static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
460static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 461
2a47ce06
JG
462static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
463 unsigned int port);
47c2b677
JG
464static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
465static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
466 void __iomem *mmio);
c9d39130
JG
467static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
468 unsigned int n_hc);
522479fb
JG
469static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
470static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
471static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port_no);
47c2b677 473
c5d3e45a
JG
474static struct scsi_host_template mv5_sht = {
475 .module = THIS_MODULE,
476 .name = DRV_NAME,
477 .ioctl = ata_scsi_ioctl,
478 .queuecommand = ata_scsi_queuecmd,
479 .can_queue = ATA_DEF_QUEUE,
480 .this_id = ATA_SHT_THIS_ID,
baf14aa1 481 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
482 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
483 .emulated = ATA_SHT_EMULATED,
484 .use_clustering = 1,
485 .proc_name = DRV_NAME,
486 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 487 .slave_configure = ata_scsi_slave_config,
c5d3e45a
JG
488 .slave_destroy = ata_scsi_slave_destroy,
489 .bios_param = ata_std_bios_param,
490};
491
492static struct scsi_host_template mv6_sht = {
20f733e7
BR
493 .module = THIS_MODULE,
494 .name = DRV_NAME,
495 .ioctl = ata_scsi_ioctl,
496 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 497 .can_queue = ATA_DEF_QUEUE,
20f733e7 498 .this_id = ATA_SHT_THIS_ID,
baf14aa1 499 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
500 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
501 .emulated = ATA_SHT_EMULATED,
d88184fb 502 .use_clustering = 1,
20f733e7
BR
503 .proc_name = DRV_NAME,
504 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 505 .slave_configure = ata_scsi_slave_config,
ccf68c34 506 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 507 .bios_param = ata_std_bios_param,
20f733e7
BR
508};
509
c9d39130 510static const struct ata_port_operations mv5_ops = {
c9d39130
JG
511 .tf_load = ata_tf_load,
512 .tf_read = ata_tf_read,
513 .check_status = ata_check_status,
514 .exec_command = ata_exec_command,
515 .dev_select = ata_std_dev_select,
516
cffacd85 517 .cable_detect = ata_cable_sata,
c9d39130
JG
518
519 .qc_prep = mv_qc_prep,
520 .qc_issue = mv_qc_issue,
0d5ff566 521 .data_xfer = ata_data_xfer,
c9d39130 522
c9d39130 523 .irq_clear = mv_irq_clear,
246ce3b6 524 .irq_on = ata_irq_on,
c9d39130 525
bdd4ddde
JG
526 .error_handler = mv_error_handler,
527 .post_internal_cmd = mv_post_int_cmd,
528 .freeze = mv_eh_freeze,
529 .thaw = mv_eh_thaw,
530
c9d39130
JG
531 .scr_read = mv5_scr_read,
532 .scr_write = mv5_scr_write,
533
534 .port_start = mv_port_start,
535 .port_stop = mv_port_stop,
c9d39130
JG
536};
537
538static const struct ata_port_operations mv6_ops = {
20f733e7
BR
539 .tf_load = ata_tf_load,
540 .tf_read = ata_tf_read,
541 .check_status = ata_check_status,
542 .exec_command = ata_exec_command,
543 .dev_select = ata_std_dev_select,
544
cffacd85 545 .cable_detect = ata_cable_sata,
20f733e7 546
31961943
BR
547 .qc_prep = mv_qc_prep,
548 .qc_issue = mv_qc_issue,
0d5ff566 549 .data_xfer = ata_data_xfer,
20f733e7 550
20f733e7 551 .irq_clear = mv_irq_clear,
246ce3b6 552 .irq_on = ata_irq_on,
20f733e7 553
bdd4ddde
JG
554 .error_handler = mv_error_handler,
555 .post_internal_cmd = mv_post_int_cmd,
556 .freeze = mv_eh_freeze,
557 .thaw = mv_eh_thaw,
558
20f733e7
BR
559 .scr_read = mv_scr_read,
560 .scr_write = mv_scr_write,
561
31961943
BR
562 .port_start = mv_port_start,
563 .port_stop = mv_port_stop,
20f733e7
BR
564};
565
e4e7b892 566static const struct ata_port_operations mv_iie_ops = {
e4e7b892
JG
567 .tf_load = ata_tf_load,
568 .tf_read = ata_tf_read,
569 .check_status = ata_check_status,
570 .exec_command = ata_exec_command,
571 .dev_select = ata_std_dev_select,
572
cffacd85 573 .cable_detect = ata_cable_sata,
e4e7b892
JG
574
575 .qc_prep = mv_qc_prep_iie,
576 .qc_issue = mv_qc_issue,
0d5ff566 577 .data_xfer = ata_data_xfer,
e4e7b892 578
e4e7b892 579 .irq_clear = mv_irq_clear,
246ce3b6 580 .irq_on = ata_irq_on,
e4e7b892 581
bdd4ddde
JG
582 .error_handler = mv_error_handler,
583 .post_internal_cmd = mv_post_int_cmd,
584 .freeze = mv_eh_freeze,
585 .thaw = mv_eh_thaw,
586
e4e7b892
JG
587 .scr_read = mv_scr_read,
588 .scr_write = mv_scr_write,
589
590 .port_start = mv_port_start,
591 .port_stop = mv_port_stop,
e4e7b892
JG
592};
593
98ac62de 594static const struct ata_port_info mv_port_info[] = {
20f733e7 595 { /* chip_504x */
cca3974e 596 .flags = MV_COMMON_FLAGS,
31961943 597 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 598 .udma_mask = ATA_UDMA6,
c9d39130 599 .port_ops = &mv5_ops,
20f733e7
BR
600 },
601 { /* chip_508x */
c5d3e45a 602 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 603 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 604 .udma_mask = ATA_UDMA6,
c9d39130 605 .port_ops = &mv5_ops,
20f733e7 606 },
47c2b677 607 { /* chip_5080 */
c5d3e45a 608 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 609 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 610 .udma_mask = ATA_UDMA6,
c9d39130 611 .port_ops = &mv5_ops,
47c2b677 612 },
20f733e7 613 { /* chip_604x */
c5d3e45a 614 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 615 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 616 .udma_mask = ATA_UDMA6,
c9d39130 617 .port_ops = &mv6_ops,
20f733e7
BR
618 },
619 { /* chip_608x */
c5d3e45a
JG
620 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
621 MV_FLAG_DUAL_HC,
31961943 622 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 623 .udma_mask = ATA_UDMA6,
c9d39130 624 .port_ops = &mv6_ops,
20f733e7 625 },
e4e7b892 626 { /* chip_6042 */
c5d3e45a 627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 628 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 629 .udma_mask = ATA_UDMA6,
e4e7b892
JG
630 .port_ops = &mv_iie_ops,
631 },
632 { /* chip_7042 */
c5d3e45a 633 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 634 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 635 .udma_mask = ATA_UDMA6,
e4e7b892
JG
636 .port_ops = &mv_iie_ops,
637 },
20f733e7
BR
638};
639
3b7d697d 640static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
641 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
642 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
643 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
644 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
645 /* RocketRAID 1740/174x have different identifiers */
646 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
647 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
648
649 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
650 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
651 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
652 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
653 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
654
655 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
656
d9f9c6bc
FA
657 /* Adaptec 1430SA */
658 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
659
02a121da 660 /* Marvell 7042 support */
6a3d586d
MT
661 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
662
02a121da
ML
663 /* Highpoint RocketRAID PCIe series */
664 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
665 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
666
2d2744fc 667 { } /* terminate list */
20f733e7
BR
668};
669
670static struct pci_driver mv_pci_driver = {
671 .name = DRV_NAME,
672 .id_table = mv_pci_tbl,
673 .probe = mv_init_one,
674 .remove = ata_pci_remove_one,
675};
676
47c2b677
JG
677static const struct mv_hw_ops mv5xxx_ops = {
678 .phy_errata = mv5_phy_errata,
679 .enable_leds = mv5_enable_leds,
680 .read_preamp = mv5_read_preamp,
681 .reset_hc = mv5_reset_hc,
522479fb
JG
682 .reset_flash = mv5_reset_flash,
683 .reset_bus = mv5_reset_bus,
47c2b677
JG
684};
685
686static const struct mv_hw_ops mv6xxx_ops = {
687 .phy_errata = mv6_phy_errata,
688 .enable_leds = mv6_enable_leds,
689 .read_preamp = mv6_read_preamp,
690 .reset_hc = mv6_reset_hc,
522479fb
JG
691 .reset_flash = mv6_reset_flash,
692 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
693};
694
ddef9bb3
JG
695/*
696 * module options
697 */
698static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
699
700
d88184fb
JG
701/* move to PCI layer or libata core? */
702static int pci_go_64(struct pci_dev *pdev)
703{
704 int rc;
705
706 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
707 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
708 if (rc) {
709 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
710 if (rc) {
711 dev_printk(KERN_ERR, &pdev->dev,
712 "64-bit DMA enable failed\n");
713 return rc;
714 }
715 }
716 } else {
717 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
718 if (rc) {
719 dev_printk(KERN_ERR, &pdev->dev,
720 "32-bit DMA enable failed\n");
721 return rc;
722 }
723 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
724 if (rc) {
725 dev_printk(KERN_ERR, &pdev->dev,
726 "32-bit consistent DMA enable failed\n");
727 return rc;
728 }
729 }
730
731 return rc;
732}
733
20f733e7
BR
734/*
735 * Functions
736 */
737
738static inline void writelfl(unsigned long data, void __iomem *addr)
739{
740 writel(data, addr);
741 (void) readl(addr); /* flush to avoid PCI posted write */
742}
743
20f733e7
BR
744static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
745{
746 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
747}
748
c9d39130
JG
749static inline unsigned int mv_hc_from_port(unsigned int port)
750{
751 return port >> MV_PORT_HC_SHIFT;
752}
753
754static inline unsigned int mv_hardport_from_port(unsigned int port)
755{
756 return port & MV_PORT_MASK;
757}
758
759static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
760 unsigned int port)
761{
762 return mv_hc_base(base, mv_hc_from_port(port));
763}
764
20f733e7
BR
765static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
766{
c9d39130 767 return mv_hc_base_from_port(base, port) +
8b260248 768 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 769 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
770}
771
772static inline void __iomem *mv_ap_base(struct ata_port *ap)
773{
0d5ff566 774 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
775}
776
cca3974e 777static inline int mv_get_hc_count(unsigned long port_flags)
31961943 778{
cca3974e 779 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
780}
781
782static void mv_irq_clear(struct ata_port *ap)
20f733e7 783{
20f733e7
BR
784}
785
c5d3e45a
JG
786static void mv_set_edma_ptrs(void __iomem *port_mmio,
787 struct mv_host_priv *hpriv,
788 struct mv_port_priv *pp)
789{
bdd4ddde
JG
790 u32 index;
791
c5d3e45a
JG
792 /*
793 * initialize request queue
794 */
bdd4ddde
JG
795 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
796
c5d3e45a
JG
797 WARN_ON(pp->crqb_dma & 0x3ff);
798 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 799 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
800 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
801
802 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 803 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
804 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
805 else
bdd4ddde 806 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
807
808 /*
809 * initialize response queue
810 */
bdd4ddde
JG
811 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
812
c5d3e45a
JG
813 WARN_ON(pp->crpb_dma & 0xff);
814 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
815
816 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 817 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
818 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
819 else
bdd4ddde 820 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 821
bdd4ddde 822 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 823 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
824}
825
05b308e1
BR
826/**
827 * mv_start_dma - Enable eDMA engine
828 * @base: port base address
829 * @pp: port private data
830 *
beec7dbc
TH
831 * Verify the local cache of the eDMA state is accurate with a
832 * WARN_ON.
05b308e1
BR
833 *
834 * LOCKING:
835 * Inherited from caller.
836 */
f630d562 837static void mv_start_dma(void __iomem *port_mmio, struct mv_host_priv *hpriv,
c5d3e45a 838 struct mv_port_priv *pp)
20f733e7 839{
c5d3e45a 840 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
bdd4ddde 841 /* clear EDMA event indicators, if any */
f630d562 842 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 843
f630d562 844 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 845
f630d562 846 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
847 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
848 }
f630d562 849 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
20f733e7
BR
850}
851
05b308e1 852/**
0ea9e179 853 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
854 * @ap: ATA channel to manipulate
855 *
beec7dbc
TH
856 * Verify the local cache of the eDMA state is accurate with a
857 * WARN_ON.
05b308e1
BR
858 *
859 * LOCKING:
860 * Inherited from caller.
861 */
0ea9e179 862static int __mv_stop_dma(struct ata_port *ap)
20f733e7 863{
31961943
BR
864 void __iomem *port_mmio = mv_ap_base(ap);
865 struct mv_port_priv *pp = ap->private_data;
31961943 866 u32 reg;
c5d3e45a 867 int i, err = 0;
31961943 868
4537deb5 869 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 870 /* Disable EDMA if active. The disable bit auto clears.
31961943 871 */
31961943
BR
872 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
873 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 874 } else {
beec7dbc 875 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
2dcb407e 876 }
8b260248 877
31961943
BR
878 /* now properly wait for the eDMA to stop */
879 for (i = 1000; i > 0; i--) {
880 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 881 if (!(reg & EDMA_EN))
31961943 882 break;
4537deb5 883
31961943
BR
884 udelay(100);
885 }
886
c5d3e45a 887 if (reg & EDMA_EN) {
f15a1daf 888 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 889 err = -EIO;
31961943 890 }
c5d3e45a
JG
891
892 return err;
20f733e7
BR
893}
894
0ea9e179
JG
895static int mv_stop_dma(struct ata_port *ap)
896{
897 unsigned long flags;
898 int rc;
899
900 spin_lock_irqsave(&ap->host->lock, flags);
901 rc = __mv_stop_dma(ap);
902 spin_unlock_irqrestore(&ap->host->lock, flags);
903
904 return rc;
905}
906
8a70f8dc 907#ifdef ATA_DEBUG
31961943 908static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 909{
31961943
BR
910 int b, w;
911 for (b = 0; b < bytes; ) {
912 DPRINTK("%p: ", start + b);
913 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 914 printk("%08x ", readl(start + b));
31961943
BR
915 b += sizeof(u32);
916 }
917 printk("\n");
918 }
31961943 919}
8a70f8dc
JG
920#endif
921
31961943
BR
922static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
923{
924#ifdef ATA_DEBUG
925 int b, w;
926 u32 dw;
927 for (b = 0; b < bytes; ) {
928 DPRINTK("%02x: ", b);
929 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
930 (void) pci_read_config_dword(pdev, b, &dw);
931 printk("%08x ", dw);
31961943
BR
932 b += sizeof(u32);
933 }
934 printk("\n");
935 }
936#endif
937}
938static void mv_dump_all_regs(void __iomem *mmio_base, int port,
939 struct pci_dev *pdev)
940{
941#ifdef ATA_DEBUG
8b260248 942 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
943 port >> MV_PORT_HC_SHIFT);
944 void __iomem *port_base;
945 int start_port, num_ports, p, start_hc, num_hcs, hc;
946
947 if (0 > port) {
948 start_hc = start_port = 0;
949 num_ports = 8; /* shld be benign for 4 port devs */
950 num_hcs = 2;
951 } else {
952 start_hc = port >> MV_PORT_HC_SHIFT;
953 start_port = port;
954 num_ports = num_hcs = 1;
955 }
8b260248 956 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
957 num_ports > 1 ? num_ports - 1 : start_port);
958
959 if (NULL != pdev) {
960 DPRINTK("PCI config space regs:\n");
961 mv_dump_pci_cfg(pdev, 0x68);
962 }
963 DPRINTK("PCI regs:\n");
964 mv_dump_mem(mmio_base+0xc00, 0x3c);
965 mv_dump_mem(mmio_base+0xd00, 0x34);
966 mv_dump_mem(mmio_base+0xf00, 0x4);
967 mv_dump_mem(mmio_base+0x1d00, 0x6c);
968 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 969 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
970 DPRINTK("HC regs (HC %i):\n", hc);
971 mv_dump_mem(hc_base, 0x1c);
972 }
973 for (p = start_port; p < start_port + num_ports; p++) {
974 port_base = mv_port_base(mmio_base, p);
2dcb407e 975 DPRINTK("EDMA regs (port %i):\n", p);
31961943 976 mv_dump_mem(port_base, 0x54);
2dcb407e 977 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
978 mv_dump_mem(port_base+0x300, 0x60);
979 }
980#endif
20f733e7
BR
981}
982
983static unsigned int mv_scr_offset(unsigned int sc_reg_in)
984{
985 unsigned int ofs;
986
987 switch (sc_reg_in) {
988 case SCR_STATUS:
989 case SCR_CONTROL:
990 case SCR_ERROR:
991 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
992 break;
993 case SCR_ACTIVE:
994 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
995 break;
996 default:
997 ofs = 0xffffffffU;
998 break;
999 }
1000 return ofs;
1001}
1002
da3dbb17 1003static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1004{
1005 unsigned int ofs = mv_scr_offset(sc_reg_in);
1006
da3dbb17
TH
1007 if (ofs != 0xffffffffU) {
1008 *val = readl(mv_ap_base(ap) + ofs);
1009 return 0;
1010 } else
1011 return -EINVAL;
20f733e7
BR
1012}
1013
da3dbb17 1014static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1015{
1016 unsigned int ofs = mv_scr_offset(sc_reg_in);
1017
da3dbb17 1018 if (ofs != 0xffffffffU) {
20f733e7 1019 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1020 return 0;
1021 } else
1022 return -EINVAL;
20f733e7
BR
1023}
1024
c5d3e45a
JG
1025static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1026 void __iomem *port_mmio)
e4e7b892
JG
1027{
1028 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1029
1030 /* set up non-NCQ EDMA configuration */
c5d3e45a 1031 cfg &= ~(1 << 9); /* disable eQue */
e4e7b892 1032
e728eabe
JG
1033 if (IS_GEN_I(hpriv)) {
1034 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1035 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 1036 }
e4e7b892 1037
e728eabe
JG
1038 else if (IS_GEN_II(hpriv)) {
1039 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1040 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
1041 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1042 }
e4e7b892
JG
1043
1044 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1045 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1046 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
1047 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1048 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
1049 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1050 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
4537deb5 1051 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
e4e7b892
JG
1052 }
1053
1054 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1055}
1056
05b308e1
BR
1057/**
1058 * mv_port_start - Port specific init/start routine.
1059 * @ap: ATA channel to manipulate
1060 *
1061 * Allocate and point to DMA memory, init port private memory,
1062 * zero indices.
1063 *
1064 * LOCKING:
1065 * Inherited from caller.
1066 */
31961943
BR
1067static int mv_port_start(struct ata_port *ap)
1068{
cca3974e
JG
1069 struct device *dev = ap->host->dev;
1070 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1071 struct mv_port_priv *pp;
1072 void __iomem *port_mmio = mv_ap_base(ap);
1073 void *mem;
1074 dma_addr_t mem_dma;
0ea9e179 1075 unsigned long flags;
24dc5f33 1076 int rc;
31961943 1077
24dc5f33 1078 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1079 if (!pp)
24dc5f33 1080 return -ENOMEM;
31961943 1081
24dc5f33
TH
1082 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1083 GFP_KERNEL);
6037d6bb 1084 if (!mem)
24dc5f33 1085 return -ENOMEM;
31961943
BR
1086 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1087
6037d6bb
JG
1088 rc = ata_pad_alloc(ap, dev);
1089 if (rc)
24dc5f33 1090 return rc;
6037d6bb 1091
8b260248 1092 /* First item in chunk of DMA memory:
31961943
BR
1093 * 32-slot command request table (CRQB), 32 bytes each in size
1094 */
1095 pp->crqb = mem;
1096 pp->crqb_dma = mem_dma;
1097 mem += MV_CRQB_Q_SZ;
1098 mem_dma += MV_CRQB_Q_SZ;
1099
8b260248 1100 /* Second item:
31961943
BR
1101 * 32-slot command response table (CRPB), 8 bytes each in size
1102 */
1103 pp->crpb = mem;
1104 pp->crpb_dma = mem_dma;
1105 mem += MV_CRPB_Q_SZ;
1106 mem_dma += MV_CRPB_Q_SZ;
1107
1108 /* Third item:
1109 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1110 */
1111 pp->sg_tbl = mem;
1112 pp->sg_tbl_dma = mem_dma;
1113
0ea9e179
JG
1114 spin_lock_irqsave(&ap->host->lock, flags);
1115
c5d3e45a 1116 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1117
c5d3e45a 1118 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1119
0ea9e179
JG
1120 spin_unlock_irqrestore(&ap->host->lock, flags);
1121
31961943
BR
1122 /* Don't turn on EDMA here...do it before DMA commands only. Else
1123 * we'll be unable to send non-data, PIO, etc due to restricted access
1124 * to shadow regs.
1125 */
1126 ap->private_data = pp;
1127 return 0;
1128}
1129
05b308e1
BR
1130/**
1131 * mv_port_stop - Port specific cleanup/stop routine.
1132 * @ap: ATA channel to manipulate
1133 *
1134 * Stop DMA, cleanup port memory.
1135 *
1136 * LOCKING:
cca3974e 1137 * This routine uses the host lock to protect the DMA stop.
05b308e1 1138 */
31961943
BR
1139static void mv_port_stop(struct ata_port *ap)
1140{
31961943 1141 mv_stop_dma(ap);
31961943
BR
1142}
1143
05b308e1
BR
1144/**
1145 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1146 * @qc: queued command whose SG list to source from
1147 *
1148 * Populate the SG list and mark the last entry.
1149 *
1150 * LOCKING:
1151 * Inherited from caller.
1152 */
6c08772e 1153static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1154{
1155 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1156 struct scatterlist *sg;
3be6cbd7 1157 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1158 unsigned int si;
31961943 1159
d88184fb 1160 mv_sg = pp->sg_tbl;
ff2aeb1e 1161 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1162 dma_addr_t addr = sg_dma_address(sg);
1163 u32 sg_len = sg_dma_len(sg);
22374677 1164
4007b493
OJ
1165 while (sg_len) {
1166 u32 offset = addr & 0xffff;
1167 u32 len = sg_len;
22374677 1168
4007b493
OJ
1169 if ((offset + sg_len > 0x10000))
1170 len = 0x10000 - offset;
1171
1172 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1173 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1174 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1175
1176 sg_len -= len;
1177 addr += len;
1178
3be6cbd7 1179 last_sg = mv_sg;
4007b493 1180 mv_sg++;
4007b493 1181 }
31961943 1182 }
3be6cbd7
JG
1183
1184 if (likely(last_sg))
1185 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1186}
1187
5796d1c4 1188static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1189{
559eedad 1190 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1191 (last ? CRQB_CMD_LAST : 0);
559eedad 1192 *cmdw = cpu_to_le16(tmp);
31961943
BR
1193}
1194
05b308e1
BR
1195/**
1196 * mv_qc_prep - Host specific command preparation.
1197 * @qc: queued command to prepare
1198 *
1199 * This routine simply redirects to the general purpose routine
1200 * if command is not DMA. Else, it handles prep of the CRQB
1201 * (command request block), does some sanity checking, and calls
1202 * the SG load routine.
1203 *
1204 * LOCKING:
1205 * Inherited from caller.
1206 */
31961943
BR
1207static void mv_qc_prep(struct ata_queued_cmd *qc)
1208{
1209 struct ata_port *ap = qc->ap;
1210 struct mv_port_priv *pp = ap->private_data;
e1469874 1211 __le16 *cw;
31961943
BR
1212 struct ata_taskfile *tf;
1213 u16 flags = 0;
a6432436 1214 unsigned in_index;
31961943 1215
2dcb407e 1216 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1217 return;
20f733e7 1218
31961943
BR
1219 /* Fill in command request block
1220 */
e4e7b892 1221 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1222 flags |= CRQB_FLAG_READ;
beec7dbc 1223 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1224 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1225 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1226
bdd4ddde
JG
1227 /* get current queue index from software */
1228 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1229
1230 pp->crqb[in_index].sg_addr =
31961943 1231 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1232 pp->crqb[in_index].sg_addr_hi =
31961943 1233 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1234 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1235
a6432436 1236 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1237 tf = &qc->tf;
1238
1239 /* Sadly, the CRQB cannot accomodate all registers--there are
1240 * only 11 bytes...so we must pick and choose required
1241 * registers based on the command. So, we drop feature and
1242 * hob_feature for [RW] DMA commands, but they are needed for
1243 * NCQ. NCQ will drop hob_nsect.
20f733e7 1244 */
31961943
BR
1245 switch (tf->command) {
1246 case ATA_CMD_READ:
1247 case ATA_CMD_READ_EXT:
1248 case ATA_CMD_WRITE:
1249 case ATA_CMD_WRITE_EXT:
c15d85c8 1250 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1251 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1252 break;
1253#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1254 case ATA_CMD_FPDMA_READ:
1255 case ATA_CMD_FPDMA_WRITE:
8b260248 1256 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1257 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1258 break;
1259#endif /* FIXME: remove this line when NCQ added */
1260 default:
1261 /* The only other commands EDMA supports in non-queued and
1262 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1263 * of which are defined/used by Linux. If we get here, this
1264 * driver needs work.
1265 *
1266 * FIXME: modify libata to give qc_prep a return value and
1267 * return error here.
1268 */
1269 BUG_ON(tf->command);
1270 break;
1271 }
1272 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1273 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1274 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1275 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1276 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1277 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1278 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1279 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1280 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1281
e4e7b892
JG
1282 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1283 return;
1284 mv_fill_sg(qc);
1285}
1286
1287/**
1288 * mv_qc_prep_iie - Host specific command preparation.
1289 * @qc: queued command to prepare
1290 *
1291 * This routine simply redirects to the general purpose routine
1292 * if command is not DMA. Else, it handles prep of the CRQB
1293 * (command request block), does some sanity checking, and calls
1294 * the SG load routine.
1295 *
1296 * LOCKING:
1297 * Inherited from caller.
1298 */
1299static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1300{
1301 struct ata_port *ap = qc->ap;
1302 struct mv_port_priv *pp = ap->private_data;
1303 struct mv_crqb_iie *crqb;
1304 struct ata_taskfile *tf;
a6432436 1305 unsigned in_index;
e4e7b892
JG
1306 u32 flags = 0;
1307
2dcb407e 1308 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1309 return;
1310
e4e7b892
JG
1311 /* Fill in Gen IIE command request block
1312 */
1313 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1314 flags |= CRQB_FLAG_READ;
1315
beec7dbc 1316 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1317 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1318 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1319 what we use as our tag */
e4e7b892 1320
bdd4ddde
JG
1321 /* get current queue index from software */
1322 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1323
1324 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1325 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1326 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1327 crqb->flags = cpu_to_le32(flags);
1328
1329 tf = &qc->tf;
1330 crqb->ata_cmd[0] = cpu_to_le32(
1331 (tf->command << 16) |
1332 (tf->feature << 24)
1333 );
1334 crqb->ata_cmd[1] = cpu_to_le32(
1335 (tf->lbal << 0) |
1336 (tf->lbam << 8) |
1337 (tf->lbah << 16) |
1338 (tf->device << 24)
1339 );
1340 crqb->ata_cmd[2] = cpu_to_le32(
1341 (tf->hob_lbal << 0) |
1342 (tf->hob_lbam << 8) |
1343 (tf->hob_lbah << 16) |
1344 (tf->hob_feature << 24)
1345 );
1346 crqb->ata_cmd[3] = cpu_to_le32(
1347 (tf->nsect << 0) |
1348 (tf->hob_nsect << 8)
1349 );
1350
1351 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1352 return;
31961943
BR
1353 mv_fill_sg(qc);
1354}
1355
05b308e1
BR
1356/**
1357 * mv_qc_issue - Initiate a command to the host
1358 * @qc: queued command to start
1359 *
1360 * This routine simply redirects to the general purpose routine
1361 * if command is not DMA. Else, it sanity checks our local
1362 * caches of the request producer/consumer indices then enables
1363 * DMA and bumps the request producer index.
1364 *
1365 * LOCKING:
1366 * Inherited from caller.
1367 */
9a3d9eb0 1368static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1369{
c5d3e45a
JG
1370 struct ata_port *ap = qc->ap;
1371 void __iomem *port_mmio = mv_ap_base(ap);
1372 struct mv_port_priv *pp = ap->private_data;
1373 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 1374 u32 in_index;
31961943 1375
c5d3e45a 1376 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1377 /* We're about to send a non-EDMA capable command to the
1378 * port. Turn off EDMA so there won't be problems accessing
1379 * shadow block, etc registers.
1380 */
0ea9e179 1381 __mv_stop_dma(ap);
31961943
BR
1382 return ata_qc_issue_prot(qc);
1383 }
1384
bdd4ddde
JG
1385 mv_start_dma(port_mmio, hpriv, pp);
1386
1387 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1388
31961943 1389 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1390 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1391 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1392
bdd4ddde 1393 pp->req_idx++;
31961943 1394
bdd4ddde 1395 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1396
1397 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1398 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1399 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1400
1401 return 0;
1402}
1403
05b308e1
BR
1404/**
1405 * mv_err_intr - Handle error interrupts on the port
1406 * @ap: ATA channel to manipulate
9b358e30 1407 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1408 *
1409 * In most cases, just clear the interrupt and move on. However,
1410 * some cases require an eDMA reset, which is done right before
1411 * the COMRESET in mv_phy_reset(). The SERR case requires a
1412 * clear of pending errors in the SATA SERROR register. Finally,
1413 * if the port disabled DMA, update our cached copy to match.
1414 *
1415 * LOCKING:
1416 * Inherited from caller.
1417 */
bdd4ddde 1418static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1419{
1420 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1421 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1422 struct mv_port_priv *pp = ap->private_data;
1423 struct mv_host_priv *hpriv = ap->host->private_data;
1424 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1425 unsigned int action = 0, err_mask = 0;
9af5c9c9 1426 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1427
bdd4ddde 1428 ata_ehi_clear_desc(ehi);
20f733e7 1429
bdd4ddde
JG
1430 if (!edma_enabled) {
1431 /* just a guess: do we need to do this? should we
1432 * expand this, and do it in all cases?
1433 */
936fd732
TH
1434 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1435 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1436 }
bdd4ddde
JG
1437
1438 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1439
1440 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1441
1442 /*
1443 * all generations share these EDMA error cause bits
1444 */
1445
1446 if (edma_err_cause & EDMA_ERR_DEV)
1447 err_mask |= AC_ERR_DEV;
1448 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1449 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1450 EDMA_ERR_INTRL_PAR)) {
1451 err_mask |= AC_ERR_ATA_BUS;
1452 action |= ATA_EH_HARDRESET;
b64bbc39 1453 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1454 }
1455 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1456 ata_ehi_hotplugged(ehi);
1457 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1458 "dev disconnect" : "dev connect");
3606a380 1459 action |= ATA_EH_HARDRESET;
bdd4ddde
JG
1460 }
1461
ee9ccdf7 1462 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1463 eh_freeze_mask = EDMA_EH_FREEZE_5;
1464
1465 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1466 struct mv_port_priv *pp = ap->private_data;
1467 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1468 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1469 }
1470 } else {
1471 eh_freeze_mask = EDMA_EH_FREEZE;
1472
1473 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1474 struct mv_port_priv *pp = ap->private_data;
1475 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1476 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1477 }
1478
1479 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1480 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1481 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde
JG
1482 err_mask = AC_ERR_ATA_BUS;
1483 action |= ATA_EH_HARDRESET;
1484 }
afb0edd9 1485 }
20f733e7
BR
1486
1487 /* Clear EDMA now that SERR cleanup done */
3606a380 1488 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1489
bdd4ddde
JG
1490 if (!err_mask) {
1491 err_mask = AC_ERR_OTHER;
1492 action |= ATA_EH_HARDRESET;
1493 }
1494
1495 ehi->serror |= serr;
1496 ehi->action |= action;
1497
1498 if (qc)
1499 qc->err_mask |= err_mask;
1500 else
1501 ehi->err_mask |= err_mask;
1502
1503 if (edma_err_cause & eh_freeze_mask)
1504 ata_port_freeze(ap);
1505 else
1506 ata_port_abort(ap);
1507}
1508
1509static void mv_intr_pio(struct ata_port *ap)
1510{
1511 struct ata_queued_cmd *qc;
1512 u8 ata_status;
1513
1514 /* ignore spurious intr if drive still BUSY */
1515 ata_status = readb(ap->ioaddr.status_addr);
1516 if (unlikely(ata_status & ATA_BUSY))
1517 return;
1518
1519 /* get active ATA command */
9af5c9c9 1520 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1521 if (unlikely(!qc)) /* no active tag */
1522 return;
1523 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1524 return;
1525
1526 /* and finally, complete the ATA command */
1527 qc->err_mask |= ac_err_mask(ata_status);
1528 ata_qc_complete(qc);
1529}
1530
1531static void mv_intr_edma(struct ata_port *ap)
1532{
1533 void __iomem *port_mmio = mv_ap_base(ap);
1534 struct mv_host_priv *hpriv = ap->host->private_data;
1535 struct mv_port_priv *pp = ap->private_data;
1536 struct ata_queued_cmd *qc;
1537 u32 out_index, in_index;
1538 bool work_done = false;
1539
1540 /* get h/w response queue pointer */
1541 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1542 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1543
1544 while (1) {
1545 u16 status;
6c1153e0 1546 unsigned int tag;
bdd4ddde
JG
1547
1548 /* get s/w response queue last-read pointer, and compare */
1549 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1550 if (in_index == out_index)
1551 break;
1552
bdd4ddde 1553 /* 50xx: get active ATA command */
0ea9e179 1554 if (IS_GEN_I(hpriv))
9af5c9c9 1555 tag = ap->link.active_tag;
bdd4ddde 1556
6c1153e0
JG
1557 /* Gen II/IIE: get active ATA command via tag, to enable
1558 * support for queueing. this works transparently for
1559 * queued and non-queued modes.
bdd4ddde 1560 */
6c1153e0
JG
1561 else if (IS_GEN_II(hpriv))
1562 tag = (le16_to_cpu(pp->crpb[out_index].id)
1563 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1564
6c1153e0
JG
1565 else /* IS_GEN_IIE */
1566 tag = (le16_to_cpu(pp->crpb[out_index].id)
1567 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1568
6c1153e0 1569 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1570
1571 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1572 * bits (WARNING: might not necessarily be associated
1573 * with this command), which -should- be clear
1574 * if all is well
1575 */
1576 status = le16_to_cpu(pp->crpb[out_index].flags);
1577 if (unlikely(status & 0xff)) {
1578 mv_err_intr(ap, qc);
1579 return;
1580 }
1581
1582 /* and finally, complete the ATA command */
1583 if (qc) {
1584 qc->err_mask |=
1585 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1586 ata_qc_complete(qc);
1587 }
1588
0ea9e179 1589 /* advance software response queue pointer, to
bdd4ddde
JG
1590 * indicate (after the loop completes) to hardware
1591 * that we have consumed a response queue entry.
1592 */
1593 work_done = true;
1594 pp->resp_idx++;
1595 }
1596
1597 if (work_done)
1598 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1599 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1600 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1601}
1602
05b308e1
BR
1603/**
1604 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1605 * @host: host specific structure
05b308e1
BR
1606 * @relevant: port error bits relevant to this host controller
1607 * @hc: which host controller we're to look at
1608 *
1609 * Read then write clear the HC interrupt status then walk each
1610 * port connected to the HC and see if it needs servicing. Port
1611 * success ints are reported in the HC interrupt status reg, the
1612 * port error ints are reported in the higher level main
1613 * interrupt status register and thus are passed in via the
1614 * 'relevant' argument.
1615 *
1616 * LOCKING:
1617 * Inherited from caller.
1618 */
cca3974e 1619static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1620{
0d5ff566 1621 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1622 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1623 u32 hc_irq_cause;
c5d3e45a 1624 int port, port0;
20f733e7 1625
35177265 1626 if (hc == 0)
20f733e7 1627 port0 = 0;
35177265 1628 else
20f733e7 1629 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1630
1631 /* we'll need the HC success int register in most cases */
1632 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1633 if (!hc_irq_cause)
1634 return;
1635
1636 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1637
1638 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1639 hc, relevant, hc_irq_cause);
20f733e7
BR
1640
1641 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1642 struct ata_port *ap = host->ports[port];
63af2a5c 1643 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1644 int have_err_bits, hard_port, shift;
55d8ca4f 1645
bdd4ddde 1646 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1647 continue;
1648
31961943 1649 shift = port << 1; /* (port * 2) */
20f733e7
BR
1650 if (port >= MV_PORTS_PER_HC) {
1651 shift++; /* skip bit 8 in the HC Main IRQ reg */
1652 }
bdd4ddde
JG
1653 have_err_bits = ((PORT0_ERR << shift) & relevant);
1654
1655 if (unlikely(have_err_bits)) {
1656 struct ata_queued_cmd *qc;
8b260248 1657
9af5c9c9 1658 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1659 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1660 continue;
1661
1662 mv_err_intr(ap, qc);
1663 continue;
1664 }
1665
1666 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1667
1668 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1669 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1670 mv_intr_edma(ap);
1671 } else {
1672 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1673 mv_intr_pio(ap);
20f733e7
BR
1674 }
1675 }
1676 VPRINTK("EXIT\n");
1677}
1678
bdd4ddde
JG
1679static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1680{
02a121da 1681 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1682 struct ata_port *ap;
1683 struct ata_queued_cmd *qc;
1684 struct ata_eh_info *ehi;
1685 unsigned int i, err_mask, printed = 0;
1686 u32 err_cause;
1687
02a121da 1688 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1689
1690 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1691 err_cause);
1692
1693 DPRINTK("All regs @ PCI error\n");
1694 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1695
02a121da 1696 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1697
1698 for (i = 0; i < host->n_ports; i++) {
1699 ap = host->ports[i];
936fd732 1700 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1701 ehi = &ap->link.eh_info;
bdd4ddde
JG
1702 ata_ehi_clear_desc(ehi);
1703 if (!printed++)
1704 ata_ehi_push_desc(ehi,
1705 "PCI err cause 0x%08x", err_cause);
1706 err_mask = AC_ERR_HOST_BUS;
1707 ehi->action = ATA_EH_HARDRESET;
9af5c9c9 1708 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1709 if (qc)
1710 qc->err_mask |= err_mask;
1711 else
1712 ehi->err_mask |= err_mask;
1713
1714 ata_port_freeze(ap);
1715 }
1716 }
1717}
1718
05b308e1 1719/**
c5d3e45a 1720 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1721 * @irq: unused
1722 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1723 *
1724 * Read the read only register to determine if any host
1725 * controllers have pending interrupts. If so, call lower level
1726 * routine to handle. Also check for PCI errors which are only
1727 * reported here.
1728 *
8b260248 1729 * LOCKING:
cca3974e 1730 * This routine holds the host lock while processing pending
05b308e1
BR
1731 * interrupts.
1732 */
7d12e780 1733static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1734{
cca3974e 1735 struct ata_host *host = dev_instance;
20f733e7 1736 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1737 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
646a4da5 1738 u32 irq_stat, irq_mask;
20f733e7 1739
646a4da5 1740 spin_lock(&host->lock);
20f733e7 1741 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
646a4da5 1742 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
1743
1744 /* check the cases where we either have nothing pending or have read
1745 * a bogus register value which can indicate HW removal or PCI fault
1746 */
646a4da5
ML
1747 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1748 goto out_unlock;
20f733e7 1749
cca3974e 1750 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1751
bdd4ddde
JG
1752 if (unlikely(irq_stat & PCI_ERR)) {
1753 mv_pci_error(host, mmio);
1754 handled = 1;
1755 goto out_unlock; /* skip all other HC irq handling */
1756 }
1757
20f733e7
BR
1758 for (hc = 0; hc < n_hcs; hc++) {
1759 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1760 if (relevant) {
cca3974e 1761 mv_host_intr(host, relevant, hc);
bdd4ddde 1762 handled = 1;
20f733e7
BR
1763 }
1764 }
615ab953 1765
bdd4ddde 1766out_unlock:
cca3974e 1767 spin_unlock(&host->lock);
20f733e7
BR
1768
1769 return IRQ_RETVAL(handled);
1770}
1771
c9d39130
JG
1772static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1773{
1774 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1775 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1776
1777 return hc_mmio + ofs;
1778}
1779
1780static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1781{
1782 unsigned int ofs;
1783
1784 switch (sc_reg_in) {
1785 case SCR_STATUS:
1786 case SCR_ERROR:
1787 case SCR_CONTROL:
1788 ofs = sc_reg_in * sizeof(u32);
1789 break;
1790 default:
1791 ofs = 0xffffffffU;
1792 break;
1793 }
1794 return ofs;
1795}
1796
da3dbb17 1797static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1798{
0d5ff566
TH
1799 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1800 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1801 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1802
da3dbb17
TH
1803 if (ofs != 0xffffffffU) {
1804 *val = readl(addr + ofs);
1805 return 0;
1806 } else
1807 return -EINVAL;
c9d39130
JG
1808}
1809
da3dbb17 1810static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1811{
0d5ff566
TH
1812 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1813 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1814 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1815
da3dbb17 1816 if (ofs != 0xffffffffU) {
0d5ff566 1817 writelfl(val, addr + ofs);
da3dbb17
TH
1818 return 0;
1819 } else
1820 return -EINVAL;
c9d39130
JG
1821}
1822
522479fb
JG
1823static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1824{
522479fb
JG
1825 int early_5080;
1826
44c10138 1827 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1828
1829 if (!early_5080) {
1830 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1831 tmp |= (1 << 0);
1832 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1833 }
1834
1835 mv_reset_pci_bus(pdev, mmio);
1836}
1837
1838static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1839{
1840 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1841}
1842
47c2b677 1843static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1844 void __iomem *mmio)
1845{
c9d39130
JG
1846 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1847 u32 tmp;
1848
1849 tmp = readl(phy_mmio + MV5_PHY_MODE);
1850
1851 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1852 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1853}
1854
47c2b677 1855static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1856{
522479fb
JG
1857 u32 tmp;
1858
1859 writel(0, mmio + MV_GPIO_PORT_CTL);
1860
1861 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1862
1863 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1864 tmp |= ~(1 << 0);
1865 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1866}
1867
2a47ce06
JG
1868static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1869 unsigned int port)
bca1c4eb 1870{
c9d39130
JG
1871 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1872 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1873 u32 tmp;
1874 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1875
1876 if (fix_apm_sq) {
1877 tmp = readl(phy_mmio + MV5_LT_MODE);
1878 tmp |= (1 << 19);
1879 writel(tmp, phy_mmio + MV5_LT_MODE);
1880
1881 tmp = readl(phy_mmio + MV5_PHY_CTL);
1882 tmp &= ~0x3;
1883 tmp |= 0x1;
1884 writel(tmp, phy_mmio + MV5_PHY_CTL);
1885 }
1886
1887 tmp = readl(phy_mmio + MV5_PHY_MODE);
1888 tmp &= ~mask;
1889 tmp |= hpriv->signal[port].pre;
1890 tmp |= hpriv->signal[port].amps;
1891 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1892}
1893
c9d39130
JG
1894
1895#undef ZERO
1896#define ZERO(reg) writel(0, port_mmio + (reg))
1897static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1898 unsigned int port)
1899{
1900 void __iomem *port_mmio = mv_port_base(mmio, port);
1901
1902 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1903
1904 mv_channel_reset(hpriv, mmio, port);
1905
1906 ZERO(0x028); /* command */
1907 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1908 ZERO(0x004); /* timer */
1909 ZERO(0x008); /* irq err cause */
1910 ZERO(0x00c); /* irq err mask */
1911 ZERO(0x010); /* rq bah */
1912 ZERO(0x014); /* rq inp */
1913 ZERO(0x018); /* rq outp */
1914 ZERO(0x01c); /* respq bah */
1915 ZERO(0x024); /* respq outp */
1916 ZERO(0x020); /* respq inp */
1917 ZERO(0x02c); /* test control */
1918 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1919}
1920#undef ZERO
1921
1922#define ZERO(reg) writel(0, hc_mmio + (reg))
1923static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1924 unsigned int hc)
47c2b677 1925{
c9d39130
JG
1926 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1927 u32 tmp;
1928
1929 ZERO(0x00c);
1930 ZERO(0x010);
1931 ZERO(0x014);
1932 ZERO(0x018);
1933
1934 tmp = readl(hc_mmio + 0x20);
1935 tmp &= 0x1c1c1c1c;
1936 tmp |= 0x03030303;
1937 writel(tmp, hc_mmio + 0x20);
1938}
1939#undef ZERO
1940
1941static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1942 unsigned int n_hc)
1943{
1944 unsigned int hc, port;
1945
1946 for (hc = 0; hc < n_hc; hc++) {
1947 for (port = 0; port < MV_PORTS_PER_HC; port++)
1948 mv5_reset_hc_port(hpriv, mmio,
1949 (hc * MV_PORTS_PER_HC) + port);
1950
1951 mv5_reset_one_hc(hpriv, mmio, hc);
1952 }
1953
1954 return 0;
47c2b677
JG
1955}
1956
101ffae2
JG
1957#undef ZERO
1958#define ZERO(reg) writel(0, mmio + (reg))
1959static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1960{
02a121da
ML
1961 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1962 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
1963 u32 tmp;
1964
1965 tmp = readl(mmio + MV_PCI_MODE);
1966 tmp &= 0xff00ffff;
1967 writel(tmp, mmio + MV_PCI_MODE);
1968
1969 ZERO(MV_PCI_DISC_TIMER);
1970 ZERO(MV_PCI_MSI_TRIGGER);
1971 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1972 ZERO(HC_MAIN_IRQ_MASK_OFS);
1973 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
1974 ZERO(hpriv->irq_cause_ofs);
1975 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
1976 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1977 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1978 ZERO(MV_PCI_ERR_ATTRIBUTE);
1979 ZERO(MV_PCI_ERR_COMMAND);
1980}
1981#undef ZERO
1982
1983static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1984{
1985 u32 tmp;
1986
1987 mv5_reset_flash(hpriv, mmio);
1988
1989 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1990 tmp &= 0x3;
1991 tmp |= (1 << 5) | (1 << 6);
1992 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1993}
1994
1995/**
1996 * mv6_reset_hc - Perform the 6xxx global soft reset
1997 * @mmio: base address of the HBA
1998 *
1999 * This routine only applies to 6xxx parts.
2000 *
2001 * LOCKING:
2002 * Inherited from caller.
2003 */
c9d39130
JG
2004static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2005 unsigned int n_hc)
101ffae2
JG
2006{
2007 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2008 int i, rc = 0;
2009 u32 t;
2010
2011 /* Following procedure defined in PCI "main command and status
2012 * register" table.
2013 */
2014 t = readl(reg);
2015 writel(t | STOP_PCI_MASTER, reg);
2016
2017 for (i = 0; i < 1000; i++) {
2018 udelay(1);
2019 t = readl(reg);
2dcb407e 2020 if (PCI_MASTER_EMPTY & t)
101ffae2 2021 break;
101ffae2
JG
2022 }
2023 if (!(PCI_MASTER_EMPTY & t)) {
2024 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2025 rc = 1;
2026 goto done;
2027 }
2028
2029 /* set reset */
2030 i = 5;
2031 do {
2032 writel(t | GLOB_SFT_RST, reg);
2033 t = readl(reg);
2034 udelay(1);
2035 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2036
2037 if (!(GLOB_SFT_RST & t)) {
2038 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2039 rc = 1;
2040 goto done;
2041 }
2042
2043 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2044 i = 5;
2045 do {
2046 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2047 t = readl(reg);
2048 udelay(1);
2049 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2050
2051 if (GLOB_SFT_RST & t) {
2052 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2053 rc = 1;
2054 }
2055done:
2056 return rc;
2057}
2058
47c2b677 2059static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2060 void __iomem *mmio)
2061{
2062 void __iomem *port_mmio;
2063 u32 tmp;
2064
ba3fe8fb
JG
2065 tmp = readl(mmio + MV_RESET_CFG);
2066 if ((tmp & (1 << 0)) == 0) {
47c2b677 2067 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2068 hpriv->signal[idx].pre = 0x1 << 5;
2069 return;
2070 }
2071
2072 port_mmio = mv_port_base(mmio, idx);
2073 tmp = readl(port_mmio + PHY_MODE2);
2074
2075 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2076 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2077}
2078
47c2b677 2079static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2080{
47c2b677 2081 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2082}
2083
c9d39130 2084static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2085 unsigned int port)
bca1c4eb 2086{
c9d39130
JG
2087 void __iomem *port_mmio = mv_port_base(mmio, port);
2088
bca1c4eb 2089 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2090 int fix_phy_mode2 =
2091 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2092 int fix_phy_mode4 =
47c2b677
JG
2093 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2094 u32 m2, tmp;
2095
2096 if (fix_phy_mode2) {
2097 m2 = readl(port_mmio + PHY_MODE2);
2098 m2 &= ~(1 << 16);
2099 m2 |= (1 << 31);
2100 writel(m2, port_mmio + PHY_MODE2);
2101
2102 udelay(200);
2103
2104 m2 = readl(port_mmio + PHY_MODE2);
2105 m2 &= ~((1 << 16) | (1 << 31));
2106 writel(m2, port_mmio + PHY_MODE2);
2107
2108 udelay(200);
2109 }
2110
2111 /* who knows what this magic does */
2112 tmp = readl(port_mmio + PHY_MODE3);
2113 tmp &= ~0x7F800000;
2114 tmp |= 0x2A800000;
2115 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2116
2117 if (fix_phy_mode4) {
47c2b677 2118 u32 m4;
bca1c4eb
JG
2119
2120 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2121
2122 if (hp_flags & MV_HP_ERRATA_60X1B2)
2123 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2124
2125 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2126
2127 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2128
2129 if (hp_flags & MV_HP_ERRATA_60X1B2)
2130 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2131 }
2132
2133 /* Revert values of pre-emphasis and signal amps to the saved ones */
2134 m2 = readl(port_mmio + PHY_MODE2);
2135
2136 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2137 m2 |= hpriv->signal[port].amps;
2138 m2 |= hpriv->signal[port].pre;
47c2b677 2139 m2 &= ~(1 << 16);
bca1c4eb 2140
e4e7b892
JG
2141 /* according to mvSata 3.6.1, some IIE values are fixed */
2142 if (IS_GEN_IIE(hpriv)) {
2143 m2 &= ~0xC30FF01F;
2144 m2 |= 0x0000900F;
2145 }
2146
bca1c4eb
JG
2147 writel(m2, port_mmio + PHY_MODE2);
2148}
2149
c9d39130
JG
2150static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2151 unsigned int port_no)
2152{
2153 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2154
2155 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2156
ee9ccdf7 2157 if (IS_GEN_II(hpriv)) {
c9d39130 2158 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2159 ifctl |= (1 << 7); /* enable gen2i speed */
2160 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2161 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2162 }
2163
2164 udelay(25); /* allow reset propagation */
2165
2166 /* Spec never mentions clearing the bit. Marvell's driver does
2167 * clear the bit, however.
2168 */
2169 writelfl(0, port_mmio + EDMA_CMD_OFS);
2170
2171 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2172
ee9ccdf7 2173 if (IS_GEN_I(hpriv))
c9d39130
JG
2174 mdelay(1);
2175}
2176
05b308e1 2177/**
bdd4ddde 2178 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2179 * @ap: ATA channel to manipulate
2180 *
2181 * Part of this is taken from __sata_phy_reset and modified to
2182 * not sleep since this routine gets called from interrupt level.
2183 *
2184 * LOCKING:
2185 * Inherited from caller. This is coded to safe to call at
2186 * interrupt level, i.e. it does not sleep.
31961943 2187 */
bdd4ddde
JG
2188static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2189 unsigned long deadline)
20f733e7 2190{
095fec88 2191 struct mv_port_priv *pp = ap->private_data;
cca3974e 2192 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2193 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2194 int retry = 5;
2195 u32 sstatus;
20f733e7
BR
2196
2197 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2198
da3dbb17
TH
2199#ifdef DEBUG
2200 {
2201 u32 sstatus, serror, scontrol;
2202
2203 mv_scr_read(ap, SCR_STATUS, &sstatus);
2204 mv_scr_read(ap, SCR_ERROR, &serror);
2205 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2206 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2d79ab8f 2207 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
da3dbb17
TH
2208 }
2209#endif
20f733e7 2210
22374677
JG
2211 /* Issue COMRESET via SControl */
2212comreset_retry:
936fd732 2213 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2214 msleep(1);
22374677 2215
936fd732 2216 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2217 msleep(20);
22374677 2218
31961943 2219 do {
936fd732 2220 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2221 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2222 break;
22374677 2223
bdd4ddde 2224 msleep(1);
c5d3e45a 2225 } while (time_before(jiffies, deadline));
20f733e7 2226
22374677 2227 /* work around errata */
ee9ccdf7 2228 if (IS_GEN_II(hpriv) &&
22374677
JG
2229 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2230 (retry-- > 0))
2231 goto comreset_retry;
095fec88 2232
da3dbb17
TH
2233#ifdef DEBUG
2234 {
2235 u32 sstatus, serror, scontrol;
2236
2237 mv_scr_read(ap, SCR_STATUS, &sstatus);
2238 mv_scr_read(ap, SCR_ERROR, &serror);
2239 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2240 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2241 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2242 }
2243#endif
31961943 2244
936fd732 2245 if (ata_link_offline(&ap->link)) {
bdd4ddde 2246 *class = ATA_DEV_NONE;
20f733e7
BR
2247 return;
2248 }
2249
22374677
JG
2250 /* even after SStatus reflects that device is ready,
2251 * it seems to take a while for link to be fully
2252 * established (and thus Status no longer 0x80/0x7F),
2253 * so we poll a bit for that, here.
2254 */
2255 retry = 20;
2256 while (1) {
2257 u8 drv_stat = ata_check_status(ap);
2258 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2259 break;
bdd4ddde 2260 msleep(500);
22374677
JG
2261 if (retry-- <= 0)
2262 break;
bdd4ddde
JG
2263 if (time_after(jiffies, deadline))
2264 break;
22374677
JG
2265 }
2266
bdd4ddde
JG
2267 /* FIXME: if we passed the deadline, the following
2268 * code probably produces an invalid result
2269 */
20f733e7 2270
bdd4ddde 2271 /* finally, read device signature from TF registers */
3f19859e 2272 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2273
2274 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2275
bdd4ddde 2276 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2277
bca1c4eb 2278 VPRINTK("EXIT\n");
20f733e7
BR
2279}
2280
cc0680a5 2281static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2282{
cc0680a5 2283 struct ata_port *ap = link->ap;
bdd4ddde 2284 struct mv_port_priv *pp = ap->private_data;
cc0680a5 2285 struct ata_eh_context *ehc = &link->eh_context;
bdd4ddde 2286 int rc;
0ea9e179 2287
bdd4ddde
JG
2288 rc = mv_stop_dma(ap);
2289 if (rc)
2290 ehc->i.action |= ATA_EH_HARDRESET;
2291
2292 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2293 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2294 ehc->i.action |= ATA_EH_HARDRESET;
2295 }
2296
2297 /* if we're about to do hardreset, nothing more to do */
2298 if (ehc->i.action & ATA_EH_HARDRESET)
2299 return 0;
2300
cc0680a5 2301 if (ata_link_online(link))
bdd4ddde
JG
2302 rc = ata_wait_ready(ap, deadline);
2303 else
2304 rc = -ENODEV;
2305
2306 return rc;
22374677
JG
2307}
2308
cc0680a5 2309static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2310 unsigned long deadline)
31961943 2311{
cc0680a5 2312 struct ata_port *ap = link->ap;
bdd4ddde 2313 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2314 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2315
bdd4ddde 2316 mv_stop_dma(ap);
31961943 2317
bdd4ddde 2318 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2319
bdd4ddde
JG
2320 mv_phy_reset(ap, class, deadline);
2321
2322 return 0;
2323}
2324
cc0680a5 2325static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2326{
cc0680a5 2327 struct ata_port *ap = link->ap;
bdd4ddde
JG
2328 u32 serr;
2329
2330 /* print link status */
cc0680a5 2331 sata_print_link_status(link);
31961943 2332
bdd4ddde 2333 /* clear SError */
cc0680a5
TH
2334 sata_scr_read(link, SCR_ERROR, &serr);
2335 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2336
2337 /* bail out if no device is present */
2338 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2339 DPRINTK("EXIT, no device\n");
2340 return;
9b358e30 2341 }
bdd4ddde
JG
2342
2343 /* set up device control */
2344 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2345}
2346
2347static void mv_error_handler(struct ata_port *ap)
2348{
2349 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2350 mv_hardreset, mv_postreset);
2351}
2352
2353static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2354{
2355 mv_stop_dma(qc->ap);
2356}
2357
2358static void mv_eh_freeze(struct ata_port *ap)
2359{
2360 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2361 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2362 u32 tmp, mask;
2363 unsigned int shift;
2364
2365 /* FIXME: handle coalescing completion events properly */
2366
2367 shift = ap->port_no * 2;
2368 if (hc > 0)
2369 shift++;
2370
2371 mask = 0x3 << shift;
2372
2373 /* disable assertion of portN err, done events */
2374 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2375 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2376}
2377
2378static void mv_eh_thaw(struct ata_port *ap)
2379{
2380 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2381 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2382 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2383 void __iomem *port_mmio = mv_ap_base(ap);
2384 u32 tmp, mask, hc_irq_cause;
2385 unsigned int shift, hc_port_no = ap->port_no;
2386
2387 /* FIXME: handle coalescing completion events properly */
2388
2389 shift = ap->port_no * 2;
2390 if (hc > 0) {
2391 shift++;
2392 hc_port_no -= 4;
2393 }
2394
2395 mask = 0x3 << shift;
2396
2397 /* clear EDMA errors on this port */
2398 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2399
2400 /* clear pending irq events */
2401 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2402 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2403 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2404 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2405
2406 /* enable assertion of portN err, done events */
2407 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2408 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2409}
2410
05b308e1
BR
2411/**
2412 * mv_port_init - Perform some early initialization on a single port.
2413 * @port: libata data structure storing shadow register addresses
2414 * @port_mmio: base address of the port
2415 *
2416 * Initialize shadow register mmio addresses, clear outstanding
2417 * interrupts on the port, and unmask interrupts for the future
2418 * start of the port.
2419 *
2420 * LOCKING:
2421 * Inherited from caller.
2422 */
31961943 2423static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2424{
0d5ff566 2425 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2426 unsigned serr_ofs;
2427
8b260248 2428 /* PIO related setup
31961943
BR
2429 */
2430 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2431 port->error_addr =
31961943
BR
2432 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2433 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2434 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2435 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2436 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2437 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2438 port->status_addr =
31961943
BR
2439 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2440 /* special case: control/altstatus doesn't have ATA_REG_ address */
2441 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2442
2443 /* unused: */
8d9db2d2 2444 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2445
31961943
BR
2446 /* Clear any currently outstanding port interrupt conditions */
2447 serr_ofs = mv_scr_offset(SCR_ERROR);
2448 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2449 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2450
646a4da5
ML
2451 /* unmask all non-transient EDMA error interrupts */
2452 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2453
8b260248 2454 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2455 readl(port_mmio + EDMA_CFG_OFS),
2456 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2457 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2458}
2459
4447d351 2460static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2461{
4447d351
TH
2462 struct pci_dev *pdev = to_pci_dev(host->dev);
2463 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2464 u32 hp_flags = hpriv->hp_flags;
2465
5796d1c4 2466 switch (board_idx) {
47c2b677
JG
2467 case chip_5080:
2468 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2469 hp_flags |= MV_HP_GEN_I;
47c2b677 2470
44c10138 2471 switch (pdev->revision) {
47c2b677
JG
2472 case 0x1:
2473 hp_flags |= MV_HP_ERRATA_50XXB0;
2474 break;
2475 case 0x3:
2476 hp_flags |= MV_HP_ERRATA_50XXB2;
2477 break;
2478 default:
2479 dev_printk(KERN_WARNING, &pdev->dev,
2480 "Applying 50XXB2 workarounds to unknown rev\n");
2481 hp_flags |= MV_HP_ERRATA_50XXB2;
2482 break;
2483 }
2484 break;
2485
bca1c4eb
JG
2486 case chip_504x:
2487 case chip_508x:
47c2b677 2488 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2489 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2490
44c10138 2491 switch (pdev->revision) {
47c2b677
JG
2492 case 0x0:
2493 hp_flags |= MV_HP_ERRATA_50XXB0;
2494 break;
2495 case 0x3:
2496 hp_flags |= MV_HP_ERRATA_50XXB2;
2497 break;
2498 default:
2499 dev_printk(KERN_WARNING, &pdev->dev,
2500 "Applying B2 workarounds to unknown rev\n");
2501 hp_flags |= MV_HP_ERRATA_50XXB2;
2502 break;
bca1c4eb
JG
2503 }
2504 break;
2505
2506 case chip_604x:
2507 case chip_608x:
47c2b677 2508 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2509 hp_flags |= MV_HP_GEN_II;
47c2b677 2510
44c10138 2511 switch (pdev->revision) {
47c2b677
JG
2512 case 0x7:
2513 hp_flags |= MV_HP_ERRATA_60X1B2;
2514 break;
2515 case 0x9:
2516 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2517 break;
2518 default:
2519 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2520 "Applying B2 workarounds to unknown rev\n");
2521 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2522 break;
2523 }
2524 break;
2525
e4e7b892 2526 case chip_7042:
02a121da 2527 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2528 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2529 (pdev->device == 0x2300 || pdev->device == 0x2310))
2530 {
4e520033
ML
2531 /*
2532 * Highpoint RocketRAID PCIe 23xx series cards:
2533 *
2534 * Unconfigured drives are treated as "Legacy"
2535 * by the BIOS, and it overwrites sector 8 with
2536 * a "Lgcy" metadata block prior to Linux boot.
2537 *
2538 * Configured drives (RAID or JBOD) leave sector 8
2539 * alone, but instead overwrite a high numbered
2540 * sector for the RAID metadata. This sector can
2541 * be determined exactly, by truncating the physical
2542 * drive capacity to a nice even GB value.
2543 *
2544 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2545 *
2546 * Warn the user, lest they think we're just buggy.
2547 */
2548 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2549 " BIOS CORRUPTS DATA on all attached drives,"
2550 " regardless of if/how they are configured."
2551 " BEWARE!\n");
2552 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2553 " use sectors 8-9 on \"Legacy\" drives,"
2554 " and avoid the final two gigabytes on"
2555 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2556 }
e4e7b892
JG
2557 case chip_6042:
2558 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2559 hp_flags |= MV_HP_GEN_IIE;
2560
44c10138 2561 switch (pdev->revision) {
e4e7b892
JG
2562 case 0x0:
2563 hp_flags |= MV_HP_ERRATA_XX42A0;
2564 break;
2565 case 0x1:
2566 hp_flags |= MV_HP_ERRATA_60X1C0;
2567 break;
2568 default:
2569 dev_printk(KERN_WARNING, &pdev->dev,
2570 "Applying 60X1C0 workarounds to unknown rev\n");
2571 hp_flags |= MV_HP_ERRATA_60X1C0;
2572 break;
2573 }
2574 break;
2575
bca1c4eb 2576 default:
5796d1c4
JG
2577 dev_printk(KERN_ERR, &pdev->dev,
2578 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2579 return 1;
2580 }
2581
2582 hpriv->hp_flags = hp_flags;
02a121da
ML
2583 if (hp_flags & MV_HP_PCIE) {
2584 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2585 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2586 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2587 } else {
2588 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2589 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2590 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2591 }
bca1c4eb
JG
2592
2593 return 0;
2594}
2595
05b308e1 2596/**
47c2b677 2597 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2598 * @host: ATA host to initialize
2599 * @board_idx: controller index
05b308e1
BR
2600 *
2601 * If possible, do an early global reset of the host. Then do
2602 * our port init and clear/unmask all/relevant host interrupts.
2603 *
2604 * LOCKING:
2605 * Inherited from caller.
2606 */
4447d351 2607static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2608{
2609 int rc = 0, n_hc, port, hc;
4447d351
TH
2610 struct pci_dev *pdev = to_pci_dev(host->dev);
2611 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2612 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2613
47c2b677
JG
2614 /* global interrupt mask */
2615 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2616
4447d351 2617 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2618 if (rc)
2619 goto done;
2620
4447d351 2621 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2622
4447d351 2623 for (port = 0; port < host->n_ports; port++)
47c2b677 2624 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2625
c9d39130 2626 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2627 if (rc)
20f733e7 2628 goto done;
20f733e7 2629
522479fb
JG
2630 hpriv->ops->reset_flash(hpriv, mmio);
2631 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2632 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2633
4447d351 2634 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2635 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2636 void __iomem *port_mmio = mv_port_base(mmio, port);
2637
2a47ce06 2638 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2639 ifctl |= (1 << 7); /* enable gen2i speed */
2640 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2641 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2642 }
2643
c9d39130 2644 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2645 }
2646
4447d351 2647 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2648 struct ata_port *ap = host->ports[port];
2a47ce06 2649 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2650 unsigned int offset = port_mmio - mmio;
2651
2652 mv_port_init(&ap->ioaddr, port_mmio);
2653
2654 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2655 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
20f733e7
BR
2656 }
2657
2658 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2659 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2660
2661 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2662 "(before clear)=0x%08x\n", hc,
2663 readl(hc_mmio + HC_CFG_OFS),
2664 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2665
2666 /* Clear any currently outstanding hc interrupt conditions */
2667 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2668 }
2669
31961943 2670 /* Clear any currently outstanding host interrupt conditions */
02a121da 2671 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943
BR
2672
2673 /* and unmask interrupt generation for host regs */
02a121da 2674 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
fb621e2f 2675
ee9ccdf7 2676 if (IS_GEN_I(hpriv))
fb621e2f
JG
2677 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2678 else
2679 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2680
2681 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2682 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2683 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2684 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
02a121da
ML
2685 readl(mmio + hpriv->irq_cause_ofs),
2686 readl(mmio + hpriv->irq_mask_ofs));
bca1c4eb 2687
31961943 2688done:
20f733e7
BR
2689 return rc;
2690}
2691
05b308e1
BR
2692/**
2693 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2694 * @host: ATA host to print info about
05b308e1
BR
2695 *
2696 * FIXME: complete this.
2697 *
2698 * LOCKING:
2699 * Inherited from caller.
2700 */
4447d351 2701static void mv_print_info(struct ata_host *host)
31961943 2702{
4447d351
TH
2703 struct pci_dev *pdev = to_pci_dev(host->dev);
2704 struct mv_host_priv *hpriv = host->private_data;
44c10138 2705 u8 scc;
c1e4fe71 2706 const char *scc_s, *gen;
31961943
BR
2707
2708 /* Use this to determine the HW stepping of the chip so we know
2709 * what errata to workaround
2710 */
31961943
BR
2711 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2712 if (scc == 0)
2713 scc_s = "SCSI";
2714 else if (scc == 0x01)
2715 scc_s = "RAID";
2716 else
c1e4fe71
JG
2717 scc_s = "?";
2718
2719 if (IS_GEN_I(hpriv))
2720 gen = "I";
2721 else if (IS_GEN_II(hpriv))
2722 gen = "II";
2723 else if (IS_GEN_IIE(hpriv))
2724 gen = "IIE";
2725 else
2726 gen = "?";
31961943 2727
a9524a76 2728 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2729 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2730 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2731 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2732}
2733
05b308e1
BR
2734/**
2735 * mv_init_one - handle a positive probe of a Marvell host
2736 * @pdev: PCI device found
2737 * @ent: PCI device ID entry for the matched host
2738 *
2739 * LOCKING:
2740 * Inherited from caller.
2741 */
20f733e7
BR
2742static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2743{
2dcb407e 2744 static int printed_version;
20f733e7 2745 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2746 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2747 struct ata_host *host;
2748 struct mv_host_priv *hpriv;
2749 int n_ports, rc;
20f733e7 2750
a9524a76
JG
2751 if (!printed_version++)
2752 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2753
4447d351
TH
2754 /* allocate host */
2755 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2756
2757 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2758 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2759 if (!host || !hpriv)
2760 return -ENOMEM;
2761 host->private_data = hpriv;
2762
2763 /* acquire resources */
24dc5f33
TH
2764 rc = pcim_enable_device(pdev);
2765 if (rc)
20f733e7 2766 return rc;
20f733e7 2767
0d5ff566
TH
2768 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2769 if (rc == -EBUSY)
24dc5f33 2770 pcim_pin_device(pdev);
0d5ff566 2771 if (rc)
24dc5f33 2772 return rc;
4447d351 2773 host->iomap = pcim_iomap_table(pdev);
20f733e7 2774
d88184fb
JG
2775 rc = pci_go_64(pdev);
2776 if (rc)
2777 return rc;
2778
20f733e7 2779 /* initialize adapter */
4447d351 2780 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2781 if (rc)
2782 return rc;
20f733e7 2783
31961943 2784 /* Enable interrupts */
6a59dcf8 2785 if (msi && pci_enable_msi(pdev))
31961943 2786 pci_intx(pdev, 1);
20f733e7 2787
31961943 2788 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2789 mv_print_info(host);
20f733e7 2790
4447d351 2791 pci_set_master(pdev);
ea8b4db9 2792 pci_try_set_mwi(pdev);
4447d351 2793 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2794 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2795}
2796
2797static int __init mv_init(void)
2798{
b7887196 2799 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2800}
2801
2802static void __exit mv_exit(void)
2803{
2804 pci_unregister_driver(&mv_pci_driver);
2805}
2806
2807MODULE_AUTHOR("Brett Russ");
2808MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2809MODULE_LICENSE("GPL");
2810MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2811MODULE_VERSION(DRV_VERSION);
2812
ddef9bb3
JG
2813module_param(msi, int, 0444);
2814MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2815
20f733e7
BR
2816module_init(mv_init);
2817module_exit(mv_exit);
This page took 0.442459 seconds and 5 git commands to generate.