use ATA_TAG_INTERNAL in ata_tag_internal()
[deliverable/linux.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
1fd2e1c2
ML
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
4a05e209
JG
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
4a05e209
JG
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
4a05e209
JG
62*/
63
64
20f733e7
BR
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
8d8b6004 72#include <linux/dmapool.h>
20f733e7 73#include <linux/dma-mapping.h>
a9524a76 74#include <linux/device.h>
f351b2d6
SB
75#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
20f733e7 77#include <scsi/scsi_host.h>
193515d5 78#include <scsi/scsi_cmnd.h>
6c08772e 79#include <scsi/scsi_device.h>
20f733e7 80#include <linux/libata.h>
20f733e7
BR
81
82#define DRV_NAME "sata_mv"
1fd2e1c2 83#define DRV_VERSION "1.20"
20f733e7
BR
84
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
20f733e7 102 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 103 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
20f733e7
BR
106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
31961943
BR
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 121 MV_MAX_SG_CT = 256,
31961943 122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 123
20f733e7
BR
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
31961943 127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
7bb3c529
SB
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
c5d3e45a 136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
47c2b677 139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 140
31961943
BR
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
20f733e7
BR
155 /* PCI interface registers */
156
31961943
BR
157 PCI_COMMAND_OFS = 0xc00,
158
20f733e7
BR
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
522479fb
JG
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
02a121da
ML
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
02a121da
ML
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 182
20f733e7
BR
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
f351b2d6
SB
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
20f733e7
BR
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
f351b2d6 202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
8b260248 203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
fb621e2f
JG
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
f351b2d6 208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
20f733e7
BR
209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
31961943 214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
31961943
BR
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
0c58912e 225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
47c2b677 226 PHY_MODE3 = 0x310,
bca1c4eb
JG
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
c9d39130
JG
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
0c58912e
ML
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 260
6c1153e0 261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
6c1153e0 267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 268
6c1153e0 269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
6c1153e0 276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 277
6c1153e0 278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
bdd4ddde
JG
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
6c1153e0 293 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
6c1153e0 308 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
20f733e7 312
31961943
BR
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
0ea9e179
JG
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 328
c9d39130 329 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 330 EDMA_ARB_CFG = 0x38,
bca1c4eb 331
31961943
BR
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 338 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 343
31961943 344 /* Port private flags (pp_flags) */
0ea9e179 345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
0ea9e179 347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
348};
349
ee9ccdf7
JG
350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
7bb3c529 353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
bca1c4eb 354
095fec88 355enum {
baf14aa1
JG
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
095fec88 360
0ea9e179
JG
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
095fec88
JG
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
0ea9e179 366 /* ditto, for response queue */
095fec88
JG
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368};
369
522479fb
JG
370enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
e4e7b892
JG
376 chip_6042,
377 chip_7042,
f351b2d6 378 chip_soc,
522479fb
JG
379};
380
31961943
BR
381/* Command ReQuest Block: 32B */
382struct mv_crqb {
e1469874
ML
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
31961943 387};
20f733e7 388
e4e7b892 389struct mv_crqb_iie {
e1469874
ML
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
e4e7b892
JG
395};
396
31961943
BR
397/* Command ResPonse Block: 8B */
398struct mv_crpb {
e1469874
ML
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
20f733e7
BR
402};
403
31961943
BR
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
e1469874
ML
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
31961943 410};
20f733e7 411
31961943
BR
412struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
eb73d558
ML
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
31961943
BR
423 u32 pp_flags;
424};
425
bca1c4eb
JG
426struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429};
430
02a121da
ML
431struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
f351b2d6
SB
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
02a121da
ML
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
da2fa9ba
ML
442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
02a121da
ML
450};
451
47c2b677 452struct mv_hw_ops {
2a47ce06
JG
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
47c2b677
JG
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
c9d39130
JG
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
522479fb 460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
462};
463
da3dbb17
TH
464static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
468static int mv_port_start(struct ata_port *ap);
469static void mv_port_stop(struct ata_port *ap);
470static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 471static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 472static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
a1efdaba
TH
473static int mv_prereset(struct ata_link *link, unsigned long deadline);
474static int mv_hardreset(struct ata_link *link, unsigned int *class,
475 unsigned long deadline);
476static void mv_postreset(struct ata_link *link, unsigned int *classes);
bdd4ddde
JG
477static void mv_eh_freeze(struct ata_port *ap);
478static void mv_eh_thaw(struct ata_port *ap);
f273827e 479static void mv6_dev_config(struct ata_device *dev);
20f733e7 480
2a47ce06
JG
481static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
482 unsigned int port);
47c2b677
JG
483static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
484static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
485 void __iomem *mmio);
c9d39130
JG
486static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
487 unsigned int n_hc);
522479fb 488static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 489static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 490
2a47ce06
JG
491static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
492 unsigned int port);
47c2b677
JG
493static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
494static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
495 void __iomem *mmio);
c9d39130
JG
496static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
497 unsigned int n_hc);
522479fb 498static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
499static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
500 void __iomem *mmio);
501static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
502 void __iomem *mmio);
503static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
504 void __iomem *mmio, unsigned int n_hc);
505static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
506 void __iomem *mmio);
507static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
7bb3c529 508static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
c9d39130
JG
509static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
510 unsigned int port_no);
72109168
ML
511static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
512 void __iomem *port_mmio, int want_ncq);
513static int __mv_stop_dma(struct ata_port *ap);
47c2b677 514
eb73d558
ML
515/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
516 * because we have to allow room for worst case splitting of
517 * PRDs for 64K boundaries in mv_fill_sg().
518 */
c5d3e45a 519static struct scsi_host_template mv5_sht = {
68d1d07b 520 ATA_BASE_SHT(DRV_NAME),
baf14aa1 521 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a 522 .dma_boundary = MV_DMA_BOUNDARY,
c5d3e45a
JG
523};
524
525static struct scsi_host_template mv6_sht = {
68d1d07b 526 ATA_NCQ_SHT(DRV_NAME),
138bfdd0 527 .can_queue = MV_MAX_Q_DEPTH - 1,
baf14aa1 528 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7 529 .dma_boundary = MV_DMA_BOUNDARY,
20f733e7
BR
530};
531
029cfd6b
TH
532static struct ata_port_operations mv5_ops = {
533 .inherits = &ata_sff_port_ops,
c9d39130 534
c9d39130
JG
535 .qc_prep = mv_qc_prep,
536 .qc_issue = mv_qc_issue,
537
bdd4ddde
JG
538 .freeze = mv_eh_freeze,
539 .thaw = mv_eh_thaw,
a1efdaba
TH
540 .prereset = mv_prereset,
541 .hardreset = mv_hardreset,
542 .postreset = mv_postreset,
543 .error_handler = ata_std_error_handler, /* avoid SFF EH */
029cfd6b 544 .post_internal_cmd = ATA_OP_NULL,
bdd4ddde 545
c9d39130
JG
546 .scr_read = mv5_scr_read,
547 .scr_write = mv5_scr_write,
548
549 .port_start = mv_port_start,
550 .port_stop = mv_port_stop,
c9d39130
JG
551};
552
029cfd6b
TH
553static struct ata_port_operations mv6_ops = {
554 .inherits = &mv5_ops,
138bfdd0 555 .qc_defer = ata_std_qc_defer,
029cfd6b 556 .dev_config = mv6_dev_config,
20f733e7
BR
557 .scr_read = mv_scr_read,
558 .scr_write = mv_scr_write,
20f733e7
BR
559};
560
029cfd6b
TH
561static struct ata_port_operations mv_iie_ops = {
562 .inherits = &mv6_ops,
563 .dev_config = ATA_OP_NULL,
e4e7b892 564 .qc_prep = mv_qc_prep_iie,
e4e7b892
JG
565};
566
98ac62de 567static const struct ata_port_info mv_port_info[] = {
20f733e7 568 { /* chip_504x */
cca3974e 569 .flags = MV_COMMON_FLAGS,
31961943 570 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 571 .udma_mask = ATA_UDMA6,
c9d39130 572 .port_ops = &mv5_ops,
20f733e7
BR
573 },
574 { /* chip_508x */
c5d3e45a 575 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 576 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 577 .udma_mask = ATA_UDMA6,
c9d39130 578 .port_ops = &mv5_ops,
20f733e7 579 },
47c2b677 580 { /* chip_5080 */
c5d3e45a 581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 582 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 583 .udma_mask = ATA_UDMA6,
c9d39130 584 .port_ops = &mv5_ops,
47c2b677 585 },
20f733e7 586 { /* chip_604x */
138bfdd0
ML
587 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
588 ATA_FLAG_NCQ,
31961943 589 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 590 .udma_mask = ATA_UDMA6,
c9d39130 591 .port_ops = &mv6_ops,
20f733e7
BR
592 },
593 { /* chip_608x */
c5d3e45a 594 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
138bfdd0 595 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
31961943 596 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 597 .udma_mask = ATA_UDMA6,
c9d39130 598 .port_ops = &mv6_ops,
20f733e7 599 },
e4e7b892 600 { /* chip_6042 */
138bfdd0
ML
601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602 ATA_FLAG_NCQ,
e4e7b892 603 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 604 .udma_mask = ATA_UDMA6,
e4e7b892
JG
605 .port_ops = &mv_iie_ops,
606 },
607 { /* chip_7042 */
138bfdd0
ML
608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
609 ATA_FLAG_NCQ,
e4e7b892 610 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 611 .udma_mask = ATA_UDMA6,
e4e7b892
JG
612 .port_ops = &mv_iie_ops,
613 },
f351b2d6
SB
614 { /* chip_soc */
615 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
616 .pio_mask = 0x1f, /* pio0-4 */
617 .udma_mask = ATA_UDMA6,
618 .port_ops = &mv_iie_ops,
619 },
20f733e7
BR
620};
621
3b7d697d 622static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
623 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
624 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
625 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
626 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
627 /* RocketRAID 1740/174x have different identifiers */
628 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
629 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
630
631 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
632 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
633 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
634 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
635 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
636
637 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
638
d9f9c6bc
FA
639 /* Adaptec 1430SA */
640 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
641
02a121da 642 /* Marvell 7042 support */
6a3d586d
MT
643 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
644
02a121da
ML
645 /* Highpoint RocketRAID PCIe series */
646 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
647 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
648
2d2744fc 649 { } /* terminate list */
20f733e7
BR
650};
651
47c2b677
JG
652static const struct mv_hw_ops mv5xxx_ops = {
653 .phy_errata = mv5_phy_errata,
654 .enable_leds = mv5_enable_leds,
655 .read_preamp = mv5_read_preamp,
656 .reset_hc = mv5_reset_hc,
522479fb
JG
657 .reset_flash = mv5_reset_flash,
658 .reset_bus = mv5_reset_bus,
47c2b677
JG
659};
660
661static const struct mv_hw_ops mv6xxx_ops = {
662 .phy_errata = mv6_phy_errata,
663 .enable_leds = mv6_enable_leds,
664 .read_preamp = mv6_read_preamp,
665 .reset_hc = mv6_reset_hc,
522479fb
JG
666 .reset_flash = mv6_reset_flash,
667 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
668};
669
f351b2d6
SB
670static const struct mv_hw_ops mv_soc_ops = {
671 .phy_errata = mv6_phy_errata,
672 .enable_leds = mv_soc_enable_leds,
673 .read_preamp = mv_soc_read_preamp,
674 .reset_hc = mv_soc_reset_hc,
675 .reset_flash = mv_soc_reset_flash,
676 .reset_bus = mv_soc_reset_bus,
677};
678
20f733e7
BR
679/*
680 * Functions
681 */
682
683static inline void writelfl(unsigned long data, void __iomem *addr)
684{
685 writel(data, addr);
686 (void) readl(addr); /* flush to avoid PCI posted write */
687}
688
20f733e7
BR
689static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
690{
691 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
692}
693
c9d39130
JG
694static inline unsigned int mv_hc_from_port(unsigned int port)
695{
696 return port >> MV_PORT_HC_SHIFT;
697}
698
699static inline unsigned int mv_hardport_from_port(unsigned int port)
700{
701 return port & MV_PORT_MASK;
702}
703
704static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
705 unsigned int port)
706{
707 return mv_hc_base(base, mv_hc_from_port(port));
708}
709
20f733e7
BR
710static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
711{
c9d39130 712 return mv_hc_base_from_port(base, port) +
8b260248 713 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 714 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
715}
716
f351b2d6
SB
717static inline void __iomem *mv_host_base(struct ata_host *host)
718{
719 struct mv_host_priv *hpriv = host->private_data;
720 return hpriv->base;
721}
722
20f733e7
BR
723static inline void __iomem *mv_ap_base(struct ata_port *ap)
724{
f351b2d6 725 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
726}
727
cca3974e 728static inline int mv_get_hc_count(unsigned long port_flags)
31961943 729{
cca3974e 730 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
731}
732
c5d3e45a
JG
733static void mv_set_edma_ptrs(void __iomem *port_mmio,
734 struct mv_host_priv *hpriv,
735 struct mv_port_priv *pp)
736{
bdd4ddde
JG
737 u32 index;
738
c5d3e45a
JG
739 /*
740 * initialize request queue
741 */
bdd4ddde
JG
742 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
743
c5d3e45a
JG
744 WARN_ON(pp->crqb_dma & 0x3ff);
745 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 746 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
747 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
748
749 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 750 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
751 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
752 else
bdd4ddde 753 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
754
755 /*
756 * initialize response queue
757 */
bdd4ddde
JG
758 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
759
c5d3e45a
JG
760 WARN_ON(pp->crpb_dma & 0xff);
761 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
762
763 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 764 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
765 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
766 else
bdd4ddde 767 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 768
bdd4ddde 769 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 770 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
771}
772
05b308e1
BR
773/**
774 * mv_start_dma - Enable eDMA engine
775 * @base: port base address
776 * @pp: port private data
777 *
beec7dbc
TH
778 * Verify the local cache of the eDMA state is accurate with a
779 * WARN_ON.
05b308e1
BR
780 *
781 * LOCKING:
782 * Inherited from caller.
783 */
0c58912e 784static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 785 struct mv_port_priv *pp, u8 protocol)
20f733e7 786{
72109168
ML
787 int want_ncq = (protocol == ATA_PROT_NCQ);
788
789 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
790 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
791 if (want_ncq != using_ncq)
792 __mv_stop_dma(ap);
793 }
c5d3e45a 794 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e
ML
795 struct mv_host_priv *hpriv = ap->host->private_data;
796 int hard_port = mv_hardport_from_port(ap->port_no);
797 void __iomem *hc_mmio = mv_hc_base_from_port(
0fca0d6f 798 mv_host_base(ap->host), hard_port);
0c58912e
ML
799 u32 hc_irq_cause, ipending;
800
bdd4ddde 801 /* clear EDMA event indicators, if any */
f630d562 802 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 803
0c58912e
ML
804 /* clear EDMA interrupt indicator, if any */
805 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
806 ipending = (DEV_IRQ << hard_port) |
807 (CRPB_DMA_DONE << hard_port);
808 if (hc_irq_cause & ipending) {
809 writelfl(hc_irq_cause & ~ipending,
810 hc_mmio + HC_IRQ_CAUSE_OFS);
811 }
812
72109168 813 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
0c58912e
ML
814
815 /* clear FIS IRQ Cause */
816 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
817
f630d562 818 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 819
f630d562 820 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
821 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
822 }
f630d562 823 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
20f733e7
BR
824}
825
05b308e1 826/**
0ea9e179 827 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
828 * @ap: ATA channel to manipulate
829 *
beec7dbc
TH
830 * Verify the local cache of the eDMA state is accurate with a
831 * WARN_ON.
05b308e1
BR
832 *
833 * LOCKING:
834 * Inherited from caller.
835 */
0ea9e179 836static int __mv_stop_dma(struct ata_port *ap)
20f733e7 837{
31961943
BR
838 void __iomem *port_mmio = mv_ap_base(ap);
839 struct mv_port_priv *pp = ap->private_data;
31961943 840 u32 reg;
c5d3e45a 841 int i, err = 0;
31961943 842
4537deb5 843 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 844 /* Disable EDMA if active. The disable bit auto clears.
31961943 845 */
31961943
BR
846 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
847 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 848 } else {
beec7dbc 849 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
2dcb407e 850 }
8b260248 851
31961943
BR
852 /* now properly wait for the eDMA to stop */
853 for (i = 1000; i > 0; i--) {
854 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 855 if (!(reg & EDMA_EN))
31961943 856 break;
4537deb5 857
31961943
BR
858 udelay(100);
859 }
860
c5d3e45a 861 if (reg & EDMA_EN) {
f15a1daf 862 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 863 err = -EIO;
31961943 864 }
c5d3e45a
JG
865
866 return err;
20f733e7
BR
867}
868
0ea9e179
JG
869static int mv_stop_dma(struct ata_port *ap)
870{
871 unsigned long flags;
872 int rc;
873
874 spin_lock_irqsave(&ap->host->lock, flags);
875 rc = __mv_stop_dma(ap);
876 spin_unlock_irqrestore(&ap->host->lock, flags);
877
878 return rc;
879}
880
8a70f8dc 881#ifdef ATA_DEBUG
31961943 882static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 883{
31961943
BR
884 int b, w;
885 for (b = 0; b < bytes; ) {
886 DPRINTK("%p: ", start + b);
887 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 888 printk("%08x ", readl(start + b));
31961943
BR
889 b += sizeof(u32);
890 }
891 printk("\n");
892 }
31961943 893}
8a70f8dc
JG
894#endif
895
31961943
BR
896static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
897{
898#ifdef ATA_DEBUG
899 int b, w;
900 u32 dw;
901 for (b = 0; b < bytes; ) {
902 DPRINTK("%02x: ", b);
903 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
904 (void) pci_read_config_dword(pdev, b, &dw);
905 printk("%08x ", dw);
31961943
BR
906 b += sizeof(u32);
907 }
908 printk("\n");
909 }
910#endif
911}
912static void mv_dump_all_regs(void __iomem *mmio_base, int port,
913 struct pci_dev *pdev)
914{
915#ifdef ATA_DEBUG
8b260248 916 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
917 port >> MV_PORT_HC_SHIFT);
918 void __iomem *port_base;
919 int start_port, num_ports, p, start_hc, num_hcs, hc;
920
921 if (0 > port) {
922 start_hc = start_port = 0;
923 num_ports = 8; /* shld be benign for 4 port devs */
924 num_hcs = 2;
925 } else {
926 start_hc = port >> MV_PORT_HC_SHIFT;
927 start_port = port;
928 num_ports = num_hcs = 1;
929 }
8b260248 930 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
931 num_ports > 1 ? num_ports - 1 : start_port);
932
933 if (NULL != pdev) {
934 DPRINTK("PCI config space regs:\n");
935 mv_dump_pci_cfg(pdev, 0x68);
936 }
937 DPRINTK("PCI regs:\n");
938 mv_dump_mem(mmio_base+0xc00, 0x3c);
939 mv_dump_mem(mmio_base+0xd00, 0x34);
940 mv_dump_mem(mmio_base+0xf00, 0x4);
941 mv_dump_mem(mmio_base+0x1d00, 0x6c);
942 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 943 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
944 DPRINTK("HC regs (HC %i):\n", hc);
945 mv_dump_mem(hc_base, 0x1c);
946 }
947 for (p = start_port; p < start_port + num_ports; p++) {
948 port_base = mv_port_base(mmio_base, p);
2dcb407e 949 DPRINTK("EDMA regs (port %i):\n", p);
31961943 950 mv_dump_mem(port_base, 0x54);
2dcb407e 951 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
952 mv_dump_mem(port_base+0x300, 0x60);
953 }
954#endif
20f733e7
BR
955}
956
957static unsigned int mv_scr_offset(unsigned int sc_reg_in)
958{
959 unsigned int ofs;
960
961 switch (sc_reg_in) {
962 case SCR_STATUS:
963 case SCR_CONTROL:
964 case SCR_ERROR:
965 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
966 break;
967 case SCR_ACTIVE:
968 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
969 break;
970 default:
971 ofs = 0xffffffffU;
972 break;
973 }
974 return ofs;
975}
976
da3dbb17 977static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
978{
979 unsigned int ofs = mv_scr_offset(sc_reg_in);
980
da3dbb17
TH
981 if (ofs != 0xffffffffU) {
982 *val = readl(mv_ap_base(ap) + ofs);
983 return 0;
984 } else
985 return -EINVAL;
20f733e7
BR
986}
987
da3dbb17 988static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
989{
990 unsigned int ofs = mv_scr_offset(sc_reg_in);
991
da3dbb17 992 if (ofs != 0xffffffffU) {
20f733e7 993 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
994 return 0;
995 } else
996 return -EINVAL;
20f733e7
BR
997}
998
f273827e
ML
999static void mv6_dev_config(struct ata_device *adev)
1000{
1001 /*
1002 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1003 * See mv_qc_prep() for more info.
1004 */
1005 if (adev->flags & ATA_DFLAG_NCQ)
1006 if (adev->max_sectors > ATA_MAX_SECTORS)
1007 adev->max_sectors = ATA_MAX_SECTORS;
1008}
1009
72109168
ML
1010static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1011 void __iomem *port_mmio, int want_ncq)
e4e7b892 1012{
0c58912e 1013 u32 cfg;
e4e7b892
JG
1014
1015 /* set up non-NCQ EDMA configuration */
0c58912e 1016 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1017
0c58912e 1018 if (IS_GEN_I(hpriv))
e4e7b892
JG
1019 cfg |= (1 << 8); /* enab config burst size mask */
1020
0c58912e 1021 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1022 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1023
1024 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1025 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1026 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1027 cfg |= (1 << 18); /* enab early completion */
e728eabe 1028 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e4e7b892
JG
1029 }
1030
72109168
ML
1031 if (want_ncq) {
1032 cfg |= EDMA_CFG_NCQ;
1033 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1034 } else
1035 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1036
e4e7b892
JG
1037 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1038}
1039
da2fa9ba
ML
1040static void mv_port_free_dma_mem(struct ata_port *ap)
1041{
1042 struct mv_host_priv *hpriv = ap->host->private_data;
1043 struct mv_port_priv *pp = ap->private_data;
eb73d558 1044 int tag;
da2fa9ba
ML
1045
1046 if (pp->crqb) {
1047 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1048 pp->crqb = NULL;
1049 }
1050 if (pp->crpb) {
1051 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1052 pp->crpb = NULL;
1053 }
eb73d558
ML
1054 /*
1055 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1056 * For later hardware, we have one unique sg_tbl per NCQ tag.
1057 */
1058 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1059 if (pp->sg_tbl[tag]) {
1060 if (tag == 0 || !IS_GEN_I(hpriv))
1061 dma_pool_free(hpriv->sg_tbl_pool,
1062 pp->sg_tbl[tag],
1063 pp->sg_tbl_dma[tag]);
1064 pp->sg_tbl[tag] = NULL;
1065 }
da2fa9ba
ML
1066 }
1067}
1068
05b308e1
BR
1069/**
1070 * mv_port_start - Port specific init/start routine.
1071 * @ap: ATA channel to manipulate
1072 *
1073 * Allocate and point to DMA memory, init port private memory,
1074 * zero indices.
1075 *
1076 * LOCKING:
1077 * Inherited from caller.
1078 */
31961943
BR
1079static int mv_port_start(struct ata_port *ap)
1080{
cca3974e
JG
1081 struct device *dev = ap->host->dev;
1082 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1083 struct mv_port_priv *pp;
1084 void __iomem *port_mmio = mv_ap_base(ap);
0ea9e179 1085 unsigned long flags;
dde20207 1086 int tag;
31961943 1087
24dc5f33 1088 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1089 if (!pp)
24dc5f33 1090 return -ENOMEM;
da2fa9ba 1091 ap->private_data = pp;
31961943 1092
da2fa9ba
ML
1093 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1094 if (!pp->crqb)
1095 return -ENOMEM;
1096 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1097
da2fa9ba
ML
1098 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1099 if (!pp->crpb)
1100 goto out_port_free_dma_mem;
1101 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1102
eb73d558
ML
1103 /*
1104 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1105 * For later hardware, we need one unique sg_tbl per NCQ tag.
1106 */
1107 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1108 if (tag == 0 || !IS_GEN_I(hpriv)) {
1109 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1110 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1111 if (!pp->sg_tbl[tag])
1112 goto out_port_free_dma_mem;
1113 } else {
1114 pp->sg_tbl[tag] = pp->sg_tbl[0];
1115 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1116 }
1117 }
31961943 1118
0ea9e179
JG
1119 spin_lock_irqsave(&ap->host->lock, flags);
1120
72109168 1121 mv_edma_cfg(pp, hpriv, port_mmio, 0);
c5d3e45a 1122 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1123
0ea9e179
JG
1124 spin_unlock_irqrestore(&ap->host->lock, flags);
1125
31961943
BR
1126 /* Don't turn on EDMA here...do it before DMA commands only. Else
1127 * we'll be unable to send non-data, PIO, etc due to restricted access
1128 * to shadow regs.
1129 */
31961943 1130 return 0;
da2fa9ba
ML
1131
1132out_port_free_dma_mem:
1133 mv_port_free_dma_mem(ap);
1134 return -ENOMEM;
31961943
BR
1135}
1136
05b308e1
BR
1137/**
1138 * mv_port_stop - Port specific cleanup/stop routine.
1139 * @ap: ATA channel to manipulate
1140 *
1141 * Stop DMA, cleanup port memory.
1142 *
1143 * LOCKING:
cca3974e 1144 * This routine uses the host lock to protect the DMA stop.
05b308e1 1145 */
31961943
BR
1146static void mv_port_stop(struct ata_port *ap)
1147{
31961943 1148 mv_stop_dma(ap);
da2fa9ba 1149 mv_port_free_dma_mem(ap);
31961943
BR
1150}
1151
05b308e1
BR
1152/**
1153 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1154 * @qc: queued command whose SG list to source from
1155 *
1156 * Populate the SG list and mark the last entry.
1157 *
1158 * LOCKING:
1159 * Inherited from caller.
1160 */
6c08772e 1161static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1162{
1163 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1164 struct scatterlist *sg;
3be6cbd7 1165 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1166 unsigned int si;
31961943 1167
eb73d558 1168 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1169 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1170 dma_addr_t addr = sg_dma_address(sg);
1171 u32 sg_len = sg_dma_len(sg);
22374677 1172
4007b493
OJ
1173 while (sg_len) {
1174 u32 offset = addr & 0xffff;
1175 u32 len = sg_len;
22374677 1176
4007b493
OJ
1177 if ((offset + sg_len > 0x10000))
1178 len = 0x10000 - offset;
1179
1180 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1181 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1182 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1183
1184 sg_len -= len;
1185 addr += len;
1186
3be6cbd7 1187 last_sg = mv_sg;
4007b493 1188 mv_sg++;
4007b493 1189 }
31961943 1190 }
3be6cbd7
JG
1191
1192 if (likely(last_sg))
1193 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1194}
1195
5796d1c4 1196static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1197{
559eedad 1198 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1199 (last ? CRQB_CMD_LAST : 0);
559eedad 1200 *cmdw = cpu_to_le16(tmp);
31961943
BR
1201}
1202
05b308e1
BR
1203/**
1204 * mv_qc_prep - Host specific command preparation.
1205 * @qc: queued command to prepare
1206 *
1207 * This routine simply redirects to the general purpose routine
1208 * if command is not DMA. Else, it handles prep of the CRQB
1209 * (command request block), does some sanity checking, and calls
1210 * the SG load routine.
1211 *
1212 * LOCKING:
1213 * Inherited from caller.
1214 */
31961943
BR
1215static void mv_qc_prep(struct ata_queued_cmd *qc)
1216{
1217 struct ata_port *ap = qc->ap;
1218 struct mv_port_priv *pp = ap->private_data;
e1469874 1219 __le16 *cw;
31961943
BR
1220 struct ata_taskfile *tf;
1221 u16 flags = 0;
a6432436 1222 unsigned in_index;
31961943 1223
138bfdd0
ML
1224 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1225 (qc->tf.protocol != ATA_PROT_NCQ))
31961943 1226 return;
20f733e7 1227
31961943
BR
1228 /* Fill in command request block
1229 */
e4e7b892 1230 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1231 flags |= CRQB_FLAG_READ;
beec7dbc 1232 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943
BR
1233 flags |= qc->tag << CRQB_TAG_SHIFT;
1234
bdd4ddde
JG
1235 /* get current queue index from software */
1236 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1237
1238 pp->crqb[in_index].sg_addr =
eb73d558 1239 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 1240 pp->crqb[in_index].sg_addr_hi =
eb73d558 1241 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 1242 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1243
a6432436 1244 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1245 tf = &qc->tf;
1246
1247 /* Sadly, the CRQB cannot accomodate all registers--there are
1248 * only 11 bytes...so we must pick and choose required
1249 * registers based on the command. So, we drop feature and
1250 * hob_feature for [RW] DMA commands, but they are needed for
1251 * NCQ. NCQ will drop hob_nsect.
20f733e7 1252 */
31961943
BR
1253 switch (tf->command) {
1254 case ATA_CMD_READ:
1255 case ATA_CMD_READ_EXT:
1256 case ATA_CMD_WRITE:
1257 case ATA_CMD_WRITE_EXT:
c15d85c8 1258 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1259 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1260 break;
31961943
BR
1261 case ATA_CMD_FPDMA_READ:
1262 case ATA_CMD_FPDMA_WRITE:
8b260248 1263 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1264 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1265 break;
31961943
BR
1266 default:
1267 /* The only other commands EDMA supports in non-queued and
1268 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1269 * of which are defined/used by Linux. If we get here, this
1270 * driver needs work.
1271 *
1272 * FIXME: modify libata to give qc_prep a return value and
1273 * return error here.
1274 */
1275 BUG_ON(tf->command);
1276 break;
1277 }
1278 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1279 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1280 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1281 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1282 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1283 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1284 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1285 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1286 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1287
e4e7b892
JG
1288 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1289 return;
1290 mv_fill_sg(qc);
1291}
1292
1293/**
1294 * mv_qc_prep_iie - Host specific command preparation.
1295 * @qc: queued command to prepare
1296 *
1297 * This routine simply redirects to the general purpose routine
1298 * if command is not DMA. Else, it handles prep of the CRQB
1299 * (command request block), does some sanity checking, and calls
1300 * the SG load routine.
1301 *
1302 * LOCKING:
1303 * Inherited from caller.
1304 */
1305static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1306{
1307 struct ata_port *ap = qc->ap;
1308 struct mv_port_priv *pp = ap->private_data;
1309 struct mv_crqb_iie *crqb;
1310 struct ata_taskfile *tf;
a6432436 1311 unsigned in_index;
e4e7b892
JG
1312 u32 flags = 0;
1313
138bfdd0
ML
1314 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1315 (qc->tf.protocol != ATA_PROT_NCQ))
e4e7b892
JG
1316 return;
1317
e4e7b892
JG
1318 /* Fill in Gen IIE command request block
1319 */
1320 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1321 flags |= CRQB_FLAG_READ;
1322
beec7dbc 1323 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1324 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 1325 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e4e7b892 1326
bdd4ddde
JG
1327 /* get current queue index from software */
1328 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1329
1330 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
1331 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1332 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
1333 crqb->flags = cpu_to_le32(flags);
1334
1335 tf = &qc->tf;
1336 crqb->ata_cmd[0] = cpu_to_le32(
1337 (tf->command << 16) |
1338 (tf->feature << 24)
1339 );
1340 crqb->ata_cmd[1] = cpu_to_le32(
1341 (tf->lbal << 0) |
1342 (tf->lbam << 8) |
1343 (tf->lbah << 16) |
1344 (tf->device << 24)
1345 );
1346 crqb->ata_cmd[2] = cpu_to_le32(
1347 (tf->hob_lbal << 0) |
1348 (tf->hob_lbam << 8) |
1349 (tf->hob_lbah << 16) |
1350 (tf->hob_feature << 24)
1351 );
1352 crqb->ata_cmd[3] = cpu_to_le32(
1353 (tf->nsect << 0) |
1354 (tf->hob_nsect << 8)
1355 );
1356
1357 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1358 return;
31961943
BR
1359 mv_fill_sg(qc);
1360}
1361
05b308e1
BR
1362/**
1363 * mv_qc_issue - Initiate a command to the host
1364 * @qc: queued command to start
1365 *
1366 * This routine simply redirects to the general purpose routine
1367 * if command is not DMA. Else, it sanity checks our local
1368 * caches of the request producer/consumer indices then enables
1369 * DMA and bumps the request producer index.
1370 *
1371 * LOCKING:
1372 * Inherited from caller.
1373 */
9a3d9eb0 1374static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1375{
c5d3e45a
JG
1376 struct ata_port *ap = qc->ap;
1377 void __iomem *port_mmio = mv_ap_base(ap);
1378 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1379 u32 in_index;
31961943 1380
138bfdd0
ML
1381 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1382 (qc->tf.protocol != ATA_PROT_NCQ)) {
31961943
BR
1383 /* We're about to send a non-EDMA capable command to the
1384 * port. Turn off EDMA so there won't be problems accessing
1385 * shadow block, etc registers.
1386 */
0ea9e179 1387 __mv_stop_dma(ap);
31961943
BR
1388 return ata_qc_issue_prot(qc);
1389 }
1390
72109168 1391 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde 1392
bdd4ddde 1393 pp->req_idx++;
31961943 1394
bdd4ddde 1395 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1396
1397 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1398 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1399 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1400
1401 return 0;
1402}
1403
05b308e1
BR
1404/**
1405 * mv_err_intr - Handle error interrupts on the port
1406 * @ap: ATA channel to manipulate
9b358e30 1407 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1408 *
1409 * In most cases, just clear the interrupt and move on. However,
1410 * some cases require an eDMA reset, which is done right before
1411 * the COMRESET in mv_phy_reset(). The SERR case requires a
1412 * clear of pending errors in the SATA SERROR register. Finally,
1413 * if the port disabled DMA, update our cached copy to match.
1414 *
1415 * LOCKING:
1416 * Inherited from caller.
1417 */
bdd4ddde 1418static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1419{
1420 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1421 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1422 struct mv_port_priv *pp = ap->private_data;
1423 struct mv_host_priv *hpriv = ap->host->private_data;
1424 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1425 unsigned int action = 0, err_mask = 0;
9af5c9c9 1426 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1427
bdd4ddde 1428 ata_ehi_clear_desc(ehi);
20f733e7 1429
bdd4ddde
JG
1430 if (!edma_enabled) {
1431 /* just a guess: do we need to do this? should we
1432 * expand this, and do it in all cases?
1433 */
936fd732
TH
1434 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1435 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1436 }
bdd4ddde
JG
1437
1438 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1439
1440 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1441
1442 /*
1443 * all generations share these EDMA error cause bits
1444 */
1445
1446 if (edma_err_cause & EDMA_ERR_DEV)
1447 err_mask |= AC_ERR_DEV;
1448 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1449 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1450 EDMA_ERR_INTRL_PAR)) {
1451 err_mask |= AC_ERR_ATA_BUS;
cf480626 1452 action |= ATA_EH_RESET;
b64bbc39 1453 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1454 }
1455 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1456 ata_ehi_hotplugged(ehi);
1457 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1458 "dev disconnect" : "dev connect");
cf480626 1459 action |= ATA_EH_RESET;
bdd4ddde
JG
1460 }
1461
ee9ccdf7 1462 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1463 eh_freeze_mask = EDMA_EH_FREEZE_5;
1464
1465 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
5ab063e3 1466 pp = ap->private_data;
bdd4ddde 1467 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1468 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1469 }
1470 } else {
1471 eh_freeze_mask = EDMA_EH_FREEZE;
1472
1473 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
5ab063e3 1474 pp = ap->private_data;
bdd4ddde 1475 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1476 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1477 }
1478
1479 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1480 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1481 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde 1482 err_mask = AC_ERR_ATA_BUS;
cf480626 1483 action |= ATA_EH_RESET;
bdd4ddde 1484 }
afb0edd9 1485 }
20f733e7
BR
1486
1487 /* Clear EDMA now that SERR cleanup done */
3606a380 1488 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1489
bdd4ddde
JG
1490 if (!err_mask) {
1491 err_mask = AC_ERR_OTHER;
cf480626 1492 action |= ATA_EH_RESET;
bdd4ddde
JG
1493 }
1494
1495 ehi->serror |= serr;
1496 ehi->action |= action;
1497
1498 if (qc)
1499 qc->err_mask |= err_mask;
1500 else
1501 ehi->err_mask |= err_mask;
1502
1503 if (edma_err_cause & eh_freeze_mask)
1504 ata_port_freeze(ap);
1505 else
1506 ata_port_abort(ap);
1507}
1508
1509static void mv_intr_pio(struct ata_port *ap)
1510{
1511 struct ata_queued_cmd *qc;
1512 u8 ata_status;
1513
1514 /* ignore spurious intr if drive still BUSY */
1515 ata_status = readb(ap->ioaddr.status_addr);
1516 if (unlikely(ata_status & ATA_BUSY))
1517 return;
1518
1519 /* get active ATA command */
9af5c9c9 1520 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1521 if (unlikely(!qc)) /* no active tag */
1522 return;
1523 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1524 return;
1525
1526 /* and finally, complete the ATA command */
1527 qc->err_mask |= ac_err_mask(ata_status);
1528 ata_qc_complete(qc);
1529}
1530
1531static void mv_intr_edma(struct ata_port *ap)
1532{
1533 void __iomem *port_mmio = mv_ap_base(ap);
1534 struct mv_host_priv *hpriv = ap->host->private_data;
1535 struct mv_port_priv *pp = ap->private_data;
1536 struct ata_queued_cmd *qc;
1537 u32 out_index, in_index;
1538 bool work_done = false;
1539
1540 /* get h/w response queue pointer */
1541 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1542 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1543
1544 while (1) {
1545 u16 status;
6c1153e0 1546 unsigned int tag;
bdd4ddde
JG
1547
1548 /* get s/w response queue last-read pointer, and compare */
1549 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1550 if (in_index == out_index)
1551 break;
1552
bdd4ddde 1553 /* 50xx: get active ATA command */
0ea9e179 1554 if (IS_GEN_I(hpriv))
9af5c9c9 1555 tag = ap->link.active_tag;
bdd4ddde 1556
6c1153e0
JG
1557 /* Gen II/IIE: get active ATA command via tag, to enable
1558 * support for queueing. this works transparently for
1559 * queued and non-queued modes.
bdd4ddde 1560 */
8c0aeb4a
ML
1561 else
1562 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
bdd4ddde 1563
6c1153e0 1564 qc = ata_qc_from_tag(ap, tag);
bdd4ddde 1565
cb924419
ML
1566 /* For non-NCQ mode, the lower 8 bits of status
1567 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1568 * which should be zero if all went well.
bdd4ddde
JG
1569 */
1570 status = le16_to_cpu(pp->crpb[out_index].flags);
cb924419 1571 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
bdd4ddde
JG
1572 mv_err_intr(ap, qc);
1573 return;
1574 }
1575
1576 /* and finally, complete the ATA command */
1577 if (qc) {
1578 qc->err_mask |=
1579 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1580 ata_qc_complete(qc);
1581 }
1582
0ea9e179 1583 /* advance software response queue pointer, to
bdd4ddde
JG
1584 * indicate (after the loop completes) to hardware
1585 * that we have consumed a response queue entry.
1586 */
1587 work_done = true;
1588 pp->resp_idx++;
1589 }
1590
1591 if (work_done)
1592 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1593 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1594 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1595}
1596
05b308e1
BR
1597/**
1598 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1599 * @host: host specific structure
05b308e1
BR
1600 * @relevant: port error bits relevant to this host controller
1601 * @hc: which host controller we're to look at
1602 *
1603 * Read then write clear the HC interrupt status then walk each
1604 * port connected to the HC and see if it needs servicing. Port
1605 * success ints are reported in the HC interrupt status reg, the
1606 * port error ints are reported in the higher level main
1607 * interrupt status register and thus are passed in via the
1608 * 'relevant' argument.
1609 *
1610 * LOCKING:
1611 * Inherited from caller.
1612 */
cca3974e 1613static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1614{
f351b2d6
SB
1615 struct mv_host_priv *hpriv = host->private_data;
1616 void __iomem *mmio = hpriv->base;
20f733e7 1617 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1618 u32 hc_irq_cause;
f351b2d6 1619 int port, port0, last_port;
20f733e7 1620
35177265 1621 if (hc == 0)
20f733e7 1622 port0 = 0;
35177265 1623 else
20f733e7 1624 port0 = MV_PORTS_PER_HC;
20f733e7 1625
f351b2d6
SB
1626 if (HAS_PCI(host))
1627 last_port = port0 + MV_PORTS_PER_HC;
1628 else
1629 last_port = port0 + hpriv->n_ports;
20f733e7
BR
1630 /* we'll need the HC success int register in most cases */
1631 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1632 if (!hc_irq_cause)
1633 return;
1634
1635 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1636
1637 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1638 hc, relevant, hc_irq_cause);
20f733e7 1639
8f71efe2 1640 for (port = port0; port < last_port; port++) {
cca3974e 1641 struct ata_port *ap = host->ports[port];
8f71efe2 1642 struct mv_port_priv *pp;
bdd4ddde 1643 int have_err_bits, hard_port, shift;
55d8ca4f 1644
bdd4ddde 1645 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1646 continue;
1647
8f71efe2
YL
1648 pp = ap->private_data;
1649
31961943 1650 shift = port << 1; /* (port * 2) */
20f733e7
BR
1651 if (port >= MV_PORTS_PER_HC) {
1652 shift++; /* skip bit 8 in the HC Main IRQ reg */
1653 }
bdd4ddde
JG
1654 have_err_bits = ((PORT0_ERR << shift) & relevant);
1655
1656 if (unlikely(have_err_bits)) {
1657 struct ata_queued_cmd *qc;
8b260248 1658
9af5c9c9 1659 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1660 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1661 continue;
1662
1663 mv_err_intr(ap, qc);
1664 continue;
1665 }
1666
1667 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1668
1669 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1670 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1671 mv_intr_edma(ap);
1672 } else {
1673 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1674 mv_intr_pio(ap);
20f733e7
BR
1675 }
1676 }
1677 VPRINTK("EXIT\n");
1678}
1679
bdd4ddde
JG
1680static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1681{
02a121da 1682 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1683 struct ata_port *ap;
1684 struct ata_queued_cmd *qc;
1685 struct ata_eh_info *ehi;
1686 unsigned int i, err_mask, printed = 0;
1687 u32 err_cause;
1688
02a121da 1689 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1690
1691 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1692 err_cause);
1693
1694 DPRINTK("All regs @ PCI error\n");
1695 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1696
02a121da 1697 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1698
1699 for (i = 0; i < host->n_ports; i++) {
1700 ap = host->ports[i];
936fd732 1701 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1702 ehi = &ap->link.eh_info;
bdd4ddde
JG
1703 ata_ehi_clear_desc(ehi);
1704 if (!printed++)
1705 ata_ehi_push_desc(ehi,
1706 "PCI err cause 0x%08x", err_cause);
1707 err_mask = AC_ERR_HOST_BUS;
cf480626 1708 ehi->action = ATA_EH_RESET;
9af5c9c9 1709 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1710 if (qc)
1711 qc->err_mask |= err_mask;
1712 else
1713 ehi->err_mask |= err_mask;
1714
1715 ata_port_freeze(ap);
1716 }
1717 }
1718}
1719
05b308e1 1720/**
c5d3e45a 1721 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1722 * @irq: unused
1723 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1724 *
1725 * Read the read only register to determine if any host
1726 * controllers have pending interrupts. If so, call lower level
1727 * routine to handle. Also check for PCI errors which are only
1728 * reported here.
1729 *
8b260248 1730 * LOCKING:
cca3974e 1731 * This routine holds the host lock while processing pending
05b308e1
BR
1732 * interrupts.
1733 */
7d12e780 1734static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1735{
cca3974e 1736 struct ata_host *host = dev_instance;
f351b2d6 1737 struct mv_host_priv *hpriv = host->private_data;
20f733e7 1738 unsigned int hc, handled = 0, n_hcs;
f351b2d6 1739 void __iomem *mmio = hpriv->base;
646a4da5 1740 u32 irq_stat, irq_mask;
20f733e7 1741
646a4da5 1742 spin_lock(&host->lock);
f351b2d6
SB
1743
1744 irq_stat = readl(hpriv->main_cause_reg_addr);
1745 irq_mask = readl(hpriv->main_mask_reg_addr);
20f733e7
BR
1746
1747 /* check the cases where we either have nothing pending or have read
1748 * a bogus register value which can indicate HW removal or PCI fault
1749 */
646a4da5
ML
1750 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1751 goto out_unlock;
20f733e7 1752
cca3974e 1753 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1754
7bb3c529 1755 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
bdd4ddde
JG
1756 mv_pci_error(host, mmio);
1757 handled = 1;
1758 goto out_unlock; /* skip all other HC irq handling */
1759 }
1760
20f733e7
BR
1761 for (hc = 0; hc < n_hcs; hc++) {
1762 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1763 if (relevant) {
cca3974e 1764 mv_host_intr(host, relevant, hc);
bdd4ddde 1765 handled = 1;
20f733e7
BR
1766 }
1767 }
615ab953 1768
bdd4ddde 1769out_unlock:
cca3974e 1770 spin_unlock(&host->lock);
20f733e7
BR
1771
1772 return IRQ_RETVAL(handled);
1773}
1774
c9d39130
JG
1775static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1776{
1777 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1778 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1779
1780 return hc_mmio + ofs;
1781}
1782
1783static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1784{
1785 unsigned int ofs;
1786
1787 switch (sc_reg_in) {
1788 case SCR_STATUS:
1789 case SCR_ERROR:
1790 case SCR_CONTROL:
1791 ofs = sc_reg_in * sizeof(u32);
1792 break;
1793 default:
1794 ofs = 0xffffffffU;
1795 break;
1796 }
1797 return ofs;
1798}
1799
da3dbb17 1800static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1801{
f351b2d6
SB
1802 struct mv_host_priv *hpriv = ap->host->private_data;
1803 void __iomem *mmio = hpriv->base;
0d5ff566 1804 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1805 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1806
da3dbb17
TH
1807 if (ofs != 0xffffffffU) {
1808 *val = readl(addr + ofs);
1809 return 0;
1810 } else
1811 return -EINVAL;
c9d39130
JG
1812}
1813
da3dbb17 1814static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1815{
f351b2d6
SB
1816 struct mv_host_priv *hpriv = ap->host->private_data;
1817 void __iomem *mmio = hpriv->base;
0d5ff566 1818 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1819 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1820
da3dbb17 1821 if (ofs != 0xffffffffU) {
0d5ff566 1822 writelfl(val, addr + ofs);
da3dbb17
TH
1823 return 0;
1824 } else
1825 return -EINVAL;
c9d39130
JG
1826}
1827
7bb3c529 1828static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 1829{
7bb3c529 1830 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
1831 int early_5080;
1832
44c10138 1833 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1834
1835 if (!early_5080) {
1836 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1837 tmp |= (1 << 0);
1838 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1839 }
1840
7bb3c529 1841 mv_reset_pci_bus(host, mmio);
522479fb
JG
1842}
1843
1844static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1845{
1846 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1847}
1848
47c2b677 1849static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1850 void __iomem *mmio)
1851{
c9d39130
JG
1852 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1853 u32 tmp;
1854
1855 tmp = readl(phy_mmio + MV5_PHY_MODE);
1856
1857 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1858 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1859}
1860
47c2b677 1861static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1862{
522479fb
JG
1863 u32 tmp;
1864
1865 writel(0, mmio + MV_GPIO_PORT_CTL);
1866
1867 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1868
1869 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1870 tmp |= ~(1 << 0);
1871 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1872}
1873
2a47ce06
JG
1874static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1875 unsigned int port)
bca1c4eb 1876{
c9d39130
JG
1877 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1878 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1879 u32 tmp;
1880 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1881
1882 if (fix_apm_sq) {
1883 tmp = readl(phy_mmio + MV5_LT_MODE);
1884 tmp |= (1 << 19);
1885 writel(tmp, phy_mmio + MV5_LT_MODE);
1886
1887 tmp = readl(phy_mmio + MV5_PHY_CTL);
1888 tmp &= ~0x3;
1889 tmp |= 0x1;
1890 writel(tmp, phy_mmio + MV5_PHY_CTL);
1891 }
1892
1893 tmp = readl(phy_mmio + MV5_PHY_MODE);
1894 tmp &= ~mask;
1895 tmp |= hpriv->signal[port].pre;
1896 tmp |= hpriv->signal[port].amps;
1897 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1898}
1899
c9d39130
JG
1900
1901#undef ZERO
1902#define ZERO(reg) writel(0, port_mmio + (reg))
1903static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 unsigned int port)
1905{
1906 void __iomem *port_mmio = mv_port_base(mmio, port);
1907
1908 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1909
1910 mv_channel_reset(hpriv, mmio, port);
1911
1912 ZERO(0x028); /* command */
1913 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1914 ZERO(0x004); /* timer */
1915 ZERO(0x008); /* irq err cause */
1916 ZERO(0x00c); /* irq err mask */
1917 ZERO(0x010); /* rq bah */
1918 ZERO(0x014); /* rq inp */
1919 ZERO(0x018); /* rq outp */
1920 ZERO(0x01c); /* respq bah */
1921 ZERO(0x024); /* respq outp */
1922 ZERO(0x020); /* respq inp */
1923 ZERO(0x02c); /* test control */
1924 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1925}
1926#undef ZERO
1927
1928#define ZERO(reg) writel(0, hc_mmio + (reg))
1929static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1930 unsigned int hc)
47c2b677 1931{
c9d39130
JG
1932 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1933 u32 tmp;
1934
1935 ZERO(0x00c);
1936 ZERO(0x010);
1937 ZERO(0x014);
1938 ZERO(0x018);
1939
1940 tmp = readl(hc_mmio + 0x20);
1941 tmp &= 0x1c1c1c1c;
1942 tmp |= 0x03030303;
1943 writel(tmp, hc_mmio + 0x20);
1944}
1945#undef ZERO
1946
1947static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1948 unsigned int n_hc)
1949{
1950 unsigned int hc, port;
1951
1952 for (hc = 0; hc < n_hc; hc++) {
1953 for (port = 0; port < MV_PORTS_PER_HC; port++)
1954 mv5_reset_hc_port(hpriv, mmio,
1955 (hc * MV_PORTS_PER_HC) + port);
1956
1957 mv5_reset_one_hc(hpriv, mmio, hc);
1958 }
1959
1960 return 0;
47c2b677
JG
1961}
1962
101ffae2
JG
1963#undef ZERO
1964#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 1965static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 1966{
02a121da 1967 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
1968 u32 tmp;
1969
1970 tmp = readl(mmio + MV_PCI_MODE);
1971 tmp &= 0xff00ffff;
1972 writel(tmp, mmio + MV_PCI_MODE);
1973
1974 ZERO(MV_PCI_DISC_TIMER);
1975 ZERO(MV_PCI_MSI_TRIGGER);
1976 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1977 ZERO(HC_MAIN_IRQ_MASK_OFS);
1978 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
1979 ZERO(hpriv->irq_cause_ofs);
1980 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
1981 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1982 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1983 ZERO(MV_PCI_ERR_ATTRIBUTE);
1984 ZERO(MV_PCI_ERR_COMMAND);
1985}
1986#undef ZERO
1987
1988static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1989{
1990 u32 tmp;
1991
1992 mv5_reset_flash(hpriv, mmio);
1993
1994 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1995 tmp &= 0x3;
1996 tmp |= (1 << 5) | (1 << 6);
1997 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1998}
1999
2000/**
2001 * mv6_reset_hc - Perform the 6xxx global soft reset
2002 * @mmio: base address of the HBA
2003 *
2004 * This routine only applies to 6xxx parts.
2005 *
2006 * LOCKING:
2007 * Inherited from caller.
2008 */
c9d39130
JG
2009static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2010 unsigned int n_hc)
101ffae2
JG
2011{
2012 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2013 int i, rc = 0;
2014 u32 t;
2015
2016 /* Following procedure defined in PCI "main command and status
2017 * register" table.
2018 */
2019 t = readl(reg);
2020 writel(t | STOP_PCI_MASTER, reg);
2021
2022 for (i = 0; i < 1000; i++) {
2023 udelay(1);
2024 t = readl(reg);
2dcb407e 2025 if (PCI_MASTER_EMPTY & t)
101ffae2 2026 break;
101ffae2
JG
2027 }
2028 if (!(PCI_MASTER_EMPTY & t)) {
2029 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2030 rc = 1;
2031 goto done;
2032 }
2033
2034 /* set reset */
2035 i = 5;
2036 do {
2037 writel(t | GLOB_SFT_RST, reg);
2038 t = readl(reg);
2039 udelay(1);
2040 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2041
2042 if (!(GLOB_SFT_RST & t)) {
2043 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2044 rc = 1;
2045 goto done;
2046 }
2047
2048 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2049 i = 5;
2050 do {
2051 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2052 t = readl(reg);
2053 udelay(1);
2054 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2055
2056 if (GLOB_SFT_RST & t) {
2057 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2058 rc = 1;
2059 }
2060done:
2061 return rc;
2062}
2063
47c2b677 2064static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2065 void __iomem *mmio)
2066{
2067 void __iomem *port_mmio;
2068 u32 tmp;
2069
ba3fe8fb
JG
2070 tmp = readl(mmio + MV_RESET_CFG);
2071 if ((tmp & (1 << 0)) == 0) {
47c2b677 2072 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2073 hpriv->signal[idx].pre = 0x1 << 5;
2074 return;
2075 }
2076
2077 port_mmio = mv_port_base(mmio, idx);
2078 tmp = readl(port_mmio + PHY_MODE2);
2079
2080 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2081 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2082}
2083
47c2b677 2084static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2085{
47c2b677 2086 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2087}
2088
c9d39130 2089static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2090 unsigned int port)
bca1c4eb 2091{
c9d39130
JG
2092 void __iomem *port_mmio = mv_port_base(mmio, port);
2093
bca1c4eb 2094 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2095 int fix_phy_mode2 =
2096 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2097 int fix_phy_mode4 =
47c2b677
JG
2098 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2099 u32 m2, tmp;
2100
2101 if (fix_phy_mode2) {
2102 m2 = readl(port_mmio + PHY_MODE2);
2103 m2 &= ~(1 << 16);
2104 m2 |= (1 << 31);
2105 writel(m2, port_mmio + PHY_MODE2);
2106
2107 udelay(200);
2108
2109 m2 = readl(port_mmio + PHY_MODE2);
2110 m2 &= ~((1 << 16) | (1 << 31));
2111 writel(m2, port_mmio + PHY_MODE2);
2112
2113 udelay(200);
2114 }
2115
2116 /* who knows what this magic does */
2117 tmp = readl(port_mmio + PHY_MODE3);
2118 tmp &= ~0x7F800000;
2119 tmp |= 0x2A800000;
2120 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2121
2122 if (fix_phy_mode4) {
47c2b677 2123 u32 m4;
bca1c4eb
JG
2124
2125 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2126
2127 if (hp_flags & MV_HP_ERRATA_60X1B2)
2128 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2129
2130 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2131
2132 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2133
2134 if (hp_flags & MV_HP_ERRATA_60X1B2)
2135 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2136 }
2137
2138 /* Revert values of pre-emphasis and signal amps to the saved ones */
2139 m2 = readl(port_mmio + PHY_MODE2);
2140
2141 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2142 m2 |= hpriv->signal[port].amps;
2143 m2 |= hpriv->signal[port].pre;
47c2b677 2144 m2 &= ~(1 << 16);
bca1c4eb 2145
e4e7b892
JG
2146 /* according to mvSata 3.6.1, some IIE values are fixed */
2147 if (IS_GEN_IIE(hpriv)) {
2148 m2 &= ~0xC30FF01F;
2149 m2 |= 0x0000900F;
2150 }
2151
bca1c4eb
JG
2152 writel(m2, port_mmio + PHY_MODE2);
2153}
2154
f351b2d6
SB
2155/* TODO: use the generic LED interface to configure the SATA Presence */
2156/* & Acitivy LEDs on the board */
2157static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2158 void __iomem *mmio)
2159{
2160 return;
2161}
2162
2163static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2164 void __iomem *mmio)
2165{
2166 void __iomem *port_mmio;
2167 u32 tmp;
2168
2169 port_mmio = mv_port_base(mmio, idx);
2170 tmp = readl(port_mmio + PHY_MODE2);
2171
2172 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2173 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2174}
2175
2176#undef ZERO
2177#define ZERO(reg) writel(0, port_mmio + (reg))
2178static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2179 void __iomem *mmio, unsigned int port)
2180{
2181 void __iomem *port_mmio = mv_port_base(mmio, port);
2182
2183 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2184
2185 mv_channel_reset(hpriv, mmio, port);
2186
2187 ZERO(0x028); /* command */
2188 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2189 ZERO(0x004); /* timer */
2190 ZERO(0x008); /* irq err cause */
2191 ZERO(0x00c); /* irq err mask */
2192 ZERO(0x010); /* rq bah */
2193 ZERO(0x014); /* rq inp */
2194 ZERO(0x018); /* rq outp */
2195 ZERO(0x01c); /* respq bah */
2196 ZERO(0x024); /* respq outp */
2197 ZERO(0x020); /* respq inp */
2198 ZERO(0x02c); /* test control */
2199 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2200}
2201
2202#undef ZERO
2203
2204#define ZERO(reg) writel(0, hc_mmio + (reg))
2205static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2206 void __iomem *mmio)
2207{
2208 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2209
2210 ZERO(0x00c);
2211 ZERO(0x010);
2212 ZERO(0x014);
2213
2214}
2215
2216#undef ZERO
2217
2218static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2219 void __iomem *mmio, unsigned int n_hc)
2220{
2221 unsigned int port;
2222
2223 for (port = 0; port < hpriv->n_ports; port++)
2224 mv_soc_reset_hc_port(hpriv, mmio, port);
2225
2226 mv_soc_reset_one_hc(hpriv, mmio);
2227
2228 return 0;
2229}
2230
2231static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2232 void __iomem *mmio)
2233{
2234 return;
2235}
2236
2237static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2238{
2239 return;
2240}
2241
c9d39130
JG
2242static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2243 unsigned int port_no)
2244{
2245 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2246
2247 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2248
ee9ccdf7 2249 if (IS_GEN_II(hpriv)) {
c9d39130 2250 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2251 ifctl |= (1 << 7); /* enable gen2i speed */
2252 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2253 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2254 }
2255
2256 udelay(25); /* allow reset propagation */
2257
2258 /* Spec never mentions clearing the bit. Marvell's driver does
2259 * clear the bit, however.
2260 */
2261 writelfl(0, port_mmio + EDMA_CMD_OFS);
2262
2263 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2264
ee9ccdf7 2265 if (IS_GEN_I(hpriv))
c9d39130
JG
2266 mdelay(1);
2267}
2268
05b308e1 2269/**
bdd4ddde 2270 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2271 * @ap: ATA channel to manipulate
2272 *
2273 * Part of this is taken from __sata_phy_reset and modified to
2274 * not sleep since this routine gets called from interrupt level.
2275 *
2276 * LOCKING:
2277 * Inherited from caller. This is coded to safe to call at
2278 * interrupt level, i.e. it does not sleep.
31961943 2279 */
bdd4ddde
JG
2280static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2281 unsigned long deadline)
20f733e7 2282{
095fec88 2283 struct mv_port_priv *pp = ap->private_data;
cca3974e 2284 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2285 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2286 int retry = 5;
2287 u32 sstatus;
20f733e7
BR
2288
2289 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2290
da3dbb17
TH
2291#ifdef DEBUG
2292 {
2293 u32 sstatus, serror, scontrol;
2294
2295 mv_scr_read(ap, SCR_STATUS, &sstatus);
2296 mv_scr_read(ap, SCR_ERROR, &serror);
2297 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2298 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2d79ab8f 2299 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
da3dbb17
TH
2300 }
2301#endif
20f733e7 2302
22374677
JG
2303 /* Issue COMRESET via SControl */
2304comreset_retry:
936fd732 2305 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2306 msleep(1);
22374677 2307
936fd732 2308 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2309 msleep(20);
22374677 2310
31961943 2311 do {
936fd732 2312 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2313 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2314 break;
22374677 2315
bdd4ddde 2316 msleep(1);
c5d3e45a 2317 } while (time_before(jiffies, deadline));
20f733e7 2318
22374677 2319 /* work around errata */
ee9ccdf7 2320 if (IS_GEN_II(hpriv) &&
22374677
JG
2321 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2322 (retry-- > 0))
2323 goto comreset_retry;
095fec88 2324
da3dbb17
TH
2325#ifdef DEBUG
2326 {
2327 u32 sstatus, serror, scontrol;
2328
2329 mv_scr_read(ap, SCR_STATUS, &sstatus);
2330 mv_scr_read(ap, SCR_ERROR, &serror);
2331 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2332 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2333 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2334 }
2335#endif
31961943 2336
936fd732 2337 if (ata_link_offline(&ap->link)) {
bdd4ddde 2338 *class = ATA_DEV_NONE;
20f733e7
BR
2339 return;
2340 }
2341
22374677
JG
2342 /* even after SStatus reflects that device is ready,
2343 * it seems to take a while for link to be fully
2344 * established (and thus Status no longer 0x80/0x7F),
2345 * so we poll a bit for that, here.
2346 */
2347 retry = 20;
2348 while (1) {
2349 u8 drv_stat = ata_check_status(ap);
2350 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2351 break;
bdd4ddde 2352 msleep(500);
22374677
JG
2353 if (retry-- <= 0)
2354 break;
bdd4ddde
JG
2355 if (time_after(jiffies, deadline))
2356 break;
22374677
JG
2357 }
2358
bdd4ddde
JG
2359 /* FIXME: if we passed the deadline, the following
2360 * code probably produces an invalid result
2361 */
20f733e7 2362
bdd4ddde 2363 /* finally, read device signature from TF registers */
3f19859e 2364 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2365
2366 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2367
bdd4ddde 2368 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2369
bca1c4eb 2370 VPRINTK("EXIT\n");
20f733e7
BR
2371}
2372
cc0680a5 2373static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2374{
cc0680a5 2375 struct ata_port *ap = link->ap;
bdd4ddde 2376 struct mv_port_priv *pp = ap->private_data;
0ea9e179 2377
cf480626 2378 mv_stop_dma(ap);
bdd4ddde 2379
cf480626 2380 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
bdd4ddde 2381 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
bdd4ddde 2382
cf480626 2383 return 0;
22374677
JG
2384}
2385
cc0680a5 2386static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2387 unsigned long deadline)
31961943 2388{
cc0680a5 2389 struct ata_port *ap = link->ap;
bdd4ddde 2390 struct mv_host_priv *hpriv = ap->host->private_data;
f351b2d6 2391 void __iomem *mmio = hpriv->base;
31961943 2392
bdd4ddde 2393 mv_stop_dma(ap);
31961943 2394
bdd4ddde 2395 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2396
bdd4ddde
JG
2397 mv_phy_reset(ap, class, deadline);
2398
2399 return 0;
2400}
2401
cc0680a5 2402static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2403{
cc0680a5 2404 struct ata_port *ap = link->ap;
bdd4ddde
JG
2405 u32 serr;
2406
2407 /* print link status */
cc0680a5 2408 sata_print_link_status(link);
31961943 2409
bdd4ddde 2410 /* clear SError */
cc0680a5
TH
2411 sata_scr_read(link, SCR_ERROR, &serr);
2412 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2413
2414 /* bail out if no device is present */
2415 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2416 DPRINTK("EXIT, no device\n");
2417 return;
9b358e30 2418 }
bdd4ddde
JG
2419
2420 /* set up device control */
2421 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2422}
2423
bdd4ddde
JG
2424static void mv_eh_freeze(struct ata_port *ap)
2425{
f351b2d6 2426 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde
JG
2427 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2428 u32 tmp, mask;
2429 unsigned int shift;
2430
2431 /* FIXME: handle coalescing completion events properly */
2432
2433 shift = ap->port_no * 2;
2434 if (hc > 0)
2435 shift++;
2436
2437 mask = 0x3 << shift;
2438
2439 /* disable assertion of portN err, done events */
f351b2d6
SB
2440 tmp = readl(hpriv->main_mask_reg_addr);
2441 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
bdd4ddde
JG
2442}
2443
2444static void mv_eh_thaw(struct ata_port *ap)
2445{
f351b2d6
SB
2446 struct mv_host_priv *hpriv = ap->host->private_data;
2447 void __iomem *mmio = hpriv->base;
bdd4ddde
JG
2448 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2449 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2450 void __iomem *port_mmio = mv_ap_base(ap);
2451 u32 tmp, mask, hc_irq_cause;
2452 unsigned int shift, hc_port_no = ap->port_no;
2453
2454 /* FIXME: handle coalescing completion events properly */
2455
2456 shift = ap->port_no * 2;
2457 if (hc > 0) {
2458 shift++;
2459 hc_port_no -= 4;
2460 }
2461
2462 mask = 0x3 << shift;
2463
2464 /* clear EDMA errors on this port */
2465 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2466
2467 /* clear pending irq events */
2468 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2469 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2470 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2471 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2472
2473 /* enable assertion of portN err, done events */
f351b2d6
SB
2474 tmp = readl(hpriv->main_mask_reg_addr);
2475 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
31961943
BR
2476}
2477
05b308e1
BR
2478/**
2479 * mv_port_init - Perform some early initialization on a single port.
2480 * @port: libata data structure storing shadow register addresses
2481 * @port_mmio: base address of the port
2482 *
2483 * Initialize shadow register mmio addresses, clear outstanding
2484 * interrupts on the port, and unmask interrupts for the future
2485 * start of the port.
2486 *
2487 * LOCKING:
2488 * Inherited from caller.
2489 */
31961943 2490static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2491{
0d5ff566 2492 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2493 unsigned serr_ofs;
2494
8b260248 2495 /* PIO related setup
31961943
BR
2496 */
2497 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2498 port->error_addr =
31961943
BR
2499 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2500 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2501 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2502 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2503 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2504 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2505 port->status_addr =
31961943
BR
2506 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2507 /* special case: control/altstatus doesn't have ATA_REG_ address */
2508 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2509
2510 /* unused: */
8d9db2d2 2511 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2512
31961943
BR
2513 /* Clear any currently outstanding port interrupt conditions */
2514 serr_ofs = mv_scr_offset(SCR_ERROR);
2515 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2516 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2517
646a4da5
ML
2518 /* unmask all non-transient EDMA error interrupts */
2519 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2520
8b260248 2521 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2522 readl(port_mmio + EDMA_CFG_OFS),
2523 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2524 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2525}
2526
4447d351 2527static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2528{
4447d351
TH
2529 struct pci_dev *pdev = to_pci_dev(host->dev);
2530 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2531 u32 hp_flags = hpriv->hp_flags;
2532
5796d1c4 2533 switch (board_idx) {
47c2b677
JG
2534 case chip_5080:
2535 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2536 hp_flags |= MV_HP_GEN_I;
47c2b677 2537
44c10138 2538 switch (pdev->revision) {
47c2b677
JG
2539 case 0x1:
2540 hp_flags |= MV_HP_ERRATA_50XXB0;
2541 break;
2542 case 0x3:
2543 hp_flags |= MV_HP_ERRATA_50XXB2;
2544 break;
2545 default:
2546 dev_printk(KERN_WARNING, &pdev->dev,
2547 "Applying 50XXB2 workarounds to unknown rev\n");
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 break;
2550 }
2551 break;
2552
bca1c4eb
JG
2553 case chip_504x:
2554 case chip_508x:
47c2b677 2555 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2556 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2557
44c10138 2558 switch (pdev->revision) {
47c2b677
JG
2559 case 0x0:
2560 hp_flags |= MV_HP_ERRATA_50XXB0;
2561 break;
2562 case 0x3:
2563 hp_flags |= MV_HP_ERRATA_50XXB2;
2564 break;
2565 default:
2566 dev_printk(KERN_WARNING, &pdev->dev,
2567 "Applying B2 workarounds to unknown rev\n");
2568 hp_flags |= MV_HP_ERRATA_50XXB2;
2569 break;
bca1c4eb
JG
2570 }
2571 break;
2572
2573 case chip_604x:
2574 case chip_608x:
47c2b677 2575 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2576 hp_flags |= MV_HP_GEN_II;
47c2b677 2577
44c10138 2578 switch (pdev->revision) {
47c2b677
JG
2579 case 0x7:
2580 hp_flags |= MV_HP_ERRATA_60X1B2;
2581 break;
2582 case 0x9:
2583 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2584 break;
2585 default:
2586 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2587 "Applying B2 workarounds to unknown rev\n");
2588 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2589 break;
2590 }
2591 break;
2592
e4e7b892 2593 case chip_7042:
02a121da 2594 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2595 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2596 (pdev->device == 0x2300 || pdev->device == 0x2310))
2597 {
4e520033
ML
2598 /*
2599 * Highpoint RocketRAID PCIe 23xx series cards:
2600 *
2601 * Unconfigured drives are treated as "Legacy"
2602 * by the BIOS, and it overwrites sector 8 with
2603 * a "Lgcy" metadata block prior to Linux boot.
2604 *
2605 * Configured drives (RAID or JBOD) leave sector 8
2606 * alone, but instead overwrite a high numbered
2607 * sector for the RAID metadata. This sector can
2608 * be determined exactly, by truncating the physical
2609 * drive capacity to a nice even GB value.
2610 *
2611 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2612 *
2613 * Warn the user, lest they think we're just buggy.
2614 */
2615 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2616 " BIOS CORRUPTS DATA on all attached drives,"
2617 " regardless of if/how they are configured."
2618 " BEWARE!\n");
2619 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2620 " use sectors 8-9 on \"Legacy\" drives,"
2621 " and avoid the final two gigabytes on"
2622 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2623 }
e4e7b892
JG
2624 case chip_6042:
2625 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2626 hp_flags |= MV_HP_GEN_IIE;
2627
44c10138 2628 switch (pdev->revision) {
e4e7b892
JG
2629 case 0x0:
2630 hp_flags |= MV_HP_ERRATA_XX42A0;
2631 break;
2632 case 0x1:
2633 hp_flags |= MV_HP_ERRATA_60X1C0;
2634 break;
2635 default:
2636 dev_printk(KERN_WARNING, &pdev->dev,
2637 "Applying 60X1C0 workarounds to unknown rev\n");
2638 hp_flags |= MV_HP_ERRATA_60X1C0;
2639 break;
2640 }
2641 break;
f351b2d6
SB
2642 case chip_soc:
2643 hpriv->ops = &mv_soc_ops;
2644 hp_flags |= MV_HP_ERRATA_60X1C0;
2645 break;
e4e7b892 2646
bca1c4eb 2647 default:
f351b2d6 2648 dev_printk(KERN_ERR, host->dev,
5796d1c4 2649 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2650 return 1;
2651 }
2652
2653 hpriv->hp_flags = hp_flags;
02a121da
ML
2654 if (hp_flags & MV_HP_PCIE) {
2655 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2656 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2657 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2658 } else {
2659 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2660 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2661 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2662 }
bca1c4eb
JG
2663
2664 return 0;
2665}
2666
05b308e1 2667/**
47c2b677 2668 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2669 * @host: ATA host to initialize
2670 * @board_idx: controller index
05b308e1
BR
2671 *
2672 * If possible, do an early global reset of the host. Then do
2673 * our port init and clear/unmask all/relevant host interrupts.
2674 *
2675 * LOCKING:
2676 * Inherited from caller.
2677 */
4447d351 2678static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2679{
2680 int rc = 0, n_hc, port, hc;
4447d351 2681 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 2682 void __iomem *mmio = hpriv->base;
47c2b677 2683
4447d351 2684 rc = mv_chip_id(host, board_idx);
bca1c4eb 2685 if (rc)
f351b2d6
SB
2686 goto done;
2687
2688 if (HAS_PCI(host)) {
2689 hpriv->main_cause_reg_addr = hpriv->base +
2690 HC_MAIN_IRQ_CAUSE_OFS;
2691 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2692 } else {
2693 hpriv->main_cause_reg_addr = hpriv->base +
2694 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2695 hpriv->main_mask_reg_addr = hpriv->base +
2696 HC_SOC_MAIN_IRQ_MASK_OFS;
2697 }
2698 /* global interrupt mask */
2699 writel(0, hpriv->main_mask_reg_addr);
bca1c4eb 2700
4447d351 2701 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2702
4447d351 2703 for (port = 0; port < host->n_ports; port++)
47c2b677 2704 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2705
c9d39130 2706 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2707 if (rc)
20f733e7 2708 goto done;
20f733e7 2709
522479fb 2710 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 2711 hpriv->ops->reset_bus(host, mmio);
47c2b677 2712 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2713
4447d351 2714 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2715 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2716 void __iomem *port_mmio = mv_port_base(mmio, port);
2717
2a47ce06 2718 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2719 ifctl |= (1 << 7); /* enable gen2i speed */
2720 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2721 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2722 }
2723
c9d39130 2724 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2725 }
2726
4447d351 2727 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2728 struct ata_port *ap = host->ports[port];
2a47ce06 2729 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2730
2731 mv_port_init(&ap->ioaddr, port_mmio);
2732
7bb3c529 2733#ifdef CONFIG_PCI
f351b2d6
SB
2734 if (HAS_PCI(host)) {
2735 unsigned int offset = port_mmio - mmio;
2736 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2737 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2738 }
7bb3c529 2739#endif
20f733e7
BR
2740 }
2741
2742 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2743 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2744
2745 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2746 "(before clear)=0x%08x\n", hc,
2747 readl(hc_mmio + HC_CFG_OFS),
2748 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2749
2750 /* Clear any currently outstanding hc interrupt conditions */
2751 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2752 }
2753
f351b2d6
SB
2754 if (HAS_PCI(host)) {
2755 /* Clear any currently outstanding host interrupt conditions */
2756 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943 2757
f351b2d6
SB
2758 /* and unmask interrupt generation for host regs */
2759 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2760 if (IS_GEN_I(hpriv))
2761 writelfl(~HC_MAIN_MASKED_IRQS_5,
2762 hpriv->main_mask_reg_addr);
2763 else
2764 writelfl(~HC_MAIN_MASKED_IRQS,
2765 hpriv->main_mask_reg_addr);
2766
2767 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2768 "PCI int cause/mask=0x%08x/0x%08x\n",
2769 readl(hpriv->main_cause_reg_addr),
2770 readl(hpriv->main_mask_reg_addr),
2771 readl(mmio + hpriv->irq_cause_ofs),
2772 readl(mmio + hpriv->irq_mask_ofs));
2773 } else {
2774 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2775 hpriv->main_mask_reg_addr);
2776 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2777 readl(hpriv->main_cause_reg_addr),
2778 readl(hpriv->main_mask_reg_addr));
2779 }
2780done:
2781 return rc;
2782}
fb621e2f 2783
fbf14e2f
BB
2784static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2785{
2786 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2787 MV_CRQB_Q_SZ, 0);
2788 if (!hpriv->crqb_pool)
2789 return -ENOMEM;
2790
2791 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2792 MV_CRPB_Q_SZ, 0);
2793 if (!hpriv->crpb_pool)
2794 return -ENOMEM;
2795
2796 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2797 MV_SG_TBL_SZ, 0);
2798 if (!hpriv->sg_tbl_pool)
2799 return -ENOMEM;
2800
2801 return 0;
2802}
2803
f351b2d6
SB
2804/**
2805 * mv_platform_probe - handle a positive probe of an soc Marvell
2806 * host
2807 * @pdev: platform device found
2808 *
2809 * LOCKING:
2810 * Inherited from caller.
2811 */
2812static int mv_platform_probe(struct platform_device *pdev)
2813{
2814 static int printed_version;
2815 const struct mv_sata_platform_data *mv_platform_data;
2816 const struct ata_port_info *ppi[] =
2817 { &mv_port_info[chip_soc], NULL };
2818 struct ata_host *host;
2819 struct mv_host_priv *hpriv;
2820 struct resource *res;
2821 int n_ports, rc;
20f733e7 2822
f351b2d6
SB
2823 if (!printed_version++)
2824 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 2825
f351b2d6
SB
2826 /*
2827 * Simple resource validation ..
2828 */
2829 if (unlikely(pdev->num_resources != 2)) {
2830 dev_err(&pdev->dev, "invalid number of resources\n");
2831 return -EINVAL;
2832 }
2833
2834 /*
2835 * Get the register base first
2836 */
2837 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2838 if (res == NULL)
2839 return -EINVAL;
2840
2841 /* allocate host */
2842 mv_platform_data = pdev->dev.platform_data;
2843 n_ports = mv_platform_data->n_ports;
2844
2845 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2846 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2847
2848 if (!host || !hpriv)
2849 return -ENOMEM;
2850 host->private_data = hpriv;
2851 hpriv->n_ports = n_ports;
2852
2853 host->iomap = NULL;
f1cb0ea1
SB
2854 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2855 res->end - res->start + 1);
f351b2d6
SB
2856 hpriv->base -= MV_SATAHC0_REG_BASE;
2857
fbf14e2f
BB
2858 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2859 if (rc)
2860 return rc;
2861
f351b2d6
SB
2862 /* initialize adapter */
2863 rc = mv_init_host(host, chip_soc);
2864 if (rc)
2865 return rc;
2866
2867 dev_printk(KERN_INFO, &pdev->dev,
2868 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2869 host->n_ports);
2870
2871 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2872 IRQF_SHARED, &mv6_sht);
2873}
2874
2875/*
2876 *
2877 * mv_platform_remove - unplug a platform interface
2878 * @pdev: platform device
2879 *
2880 * A platform bus SATA device has been unplugged. Perform the needed
2881 * cleanup. Also called on module unload for any active devices.
2882 */
2883static int __devexit mv_platform_remove(struct platform_device *pdev)
2884{
2885 struct device *dev = &pdev->dev;
2886 struct ata_host *host = dev_get_drvdata(dev);
f351b2d6
SB
2887
2888 ata_host_detach(host);
f351b2d6 2889 return 0;
20f733e7
BR
2890}
2891
f351b2d6
SB
2892static struct platform_driver mv_platform_driver = {
2893 .probe = mv_platform_probe,
2894 .remove = __devexit_p(mv_platform_remove),
2895 .driver = {
2896 .name = DRV_NAME,
2897 .owner = THIS_MODULE,
2898 },
2899};
2900
2901
7bb3c529 2902#ifdef CONFIG_PCI
f351b2d6
SB
2903static int mv_pci_init_one(struct pci_dev *pdev,
2904 const struct pci_device_id *ent);
2905
7bb3c529
SB
2906
2907static struct pci_driver mv_pci_driver = {
2908 .name = DRV_NAME,
2909 .id_table = mv_pci_tbl,
f351b2d6 2910 .probe = mv_pci_init_one,
7bb3c529
SB
2911 .remove = ata_pci_remove_one,
2912};
2913
2914/*
2915 * module options
2916 */
2917static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2918
2919
2920/* move to PCI layer or libata core? */
2921static int pci_go_64(struct pci_dev *pdev)
2922{
2923 int rc;
2924
2925 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2926 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2927 if (rc) {
2928 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2929 if (rc) {
2930 dev_printk(KERN_ERR, &pdev->dev,
2931 "64-bit DMA enable failed\n");
2932 return rc;
2933 }
2934 }
2935 } else {
2936 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2937 if (rc) {
2938 dev_printk(KERN_ERR, &pdev->dev,
2939 "32-bit DMA enable failed\n");
2940 return rc;
2941 }
2942 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2943 if (rc) {
2944 dev_printk(KERN_ERR, &pdev->dev,
2945 "32-bit consistent DMA enable failed\n");
2946 return rc;
2947 }
2948 }
2949
2950 return rc;
2951}
2952
05b308e1
BR
2953/**
2954 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2955 * @host: ATA host to print info about
05b308e1
BR
2956 *
2957 * FIXME: complete this.
2958 *
2959 * LOCKING:
2960 * Inherited from caller.
2961 */
4447d351 2962static void mv_print_info(struct ata_host *host)
31961943 2963{
4447d351
TH
2964 struct pci_dev *pdev = to_pci_dev(host->dev);
2965 struct mv_host_priv *hpriv = host->private_data;
44c10138 2966 u8 scc;
c1e4fe71 2967 const char *scc_s, *gen;
31961943
BR
2968
2969 /* Use this to determine the HW stepping of the chip so we know
2970 * what errata to workaround
2971 */
31961943
BR
2972 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2973 if (scc == 0)
2974 scc_s = "SCSI";
2975 else if (scc == 0x01)
2976 scc_s = "RAID";
2977 else
c1e4fe71
JG
2978 scc_s = "?";
2979
2980 if (IS_GEN_I(hpriv))
2981 gen = "I";
2982 else if (IS_GEN_II(hpriv))
2983 gen = "II";
2984 else if (IS_GEN_IIE(hpriv))
2985 gen = "IIE";
2986 else
2987 gen = "?";
31961943 2988
a9524a76 2989 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2990 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2991 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2992 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2993}
2994
05b308e1 2995/**
f351b2d6 2996 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
2997 * @pdev: PCI device found
2998 * @ent: PCI device ID entry for the matched host
2999 *
3000 * LOCKING:
3001 * Inherited from caller.
3002 */
f351b2d6
SB
3003static int mv_pci_init_one(struct pci_dev *pdev,
3004 const struct pci_device_id *ent)
20f733e7 3005{
2dcb407e 3006 static int printed_version;
20f733e7 3007 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
3008 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3009 struct ata_host *host;
3010 struct mv_host_priv *hpriv;
3011 int n_ports, rc;
20f733e7 3012
a9524a76
JG
3013 if (!printed_version++)
3014 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 3015
4447d351
TH
3016 /* allocate host */
3017 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3018
3019 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3020 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3021 if (!host || !hpriv)
3022 return -ENOMEM;
3023 host->private_data = hpriv;
f351b2d6 3024 hpriv->n_ports = n_ports;
4447d351
TH
3025
3026 /* acquire resources */
24dc5f33
TH
3027 rc = pcim_enable_device(pdev);
3028 if (rc)
20f733e7 3029 return rc;
20f733e7 3030
0d5ff566
TH
3031 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3032 if (rc == -EBUSY)
24dc5f33 3033 pcim_pin_device(pdev);
0d5ff566 3034 if (rc)
24dc5f33 3035 return rc;
4447d351 3036 host->iomap = pcim_iomap_table(pdev);
f351b2d6 3037 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 3038
d88184fb
JG
3039 rc = pci_go_64(pdev);
3040 if (rc)
3041 return rc;
3042
da2fa9ba
ML
3043 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3044 if (rc)
3045 return rc;
3046
20f733e7 3047 /* initialize adapter */
4447d351 3048 rc = mv_init_host(host, board_idx);
24dc5f33
TH
3049 if (rc)
3050 return rc;
20f733e7 3051
31961943 3052 /* Enable interrupts */
6a59dcf8 3053 if (msi && pci_enable_msi(pdev))
31961943 3054 pci_intx(pdev, 1);
20f733e7 3055
31961943 3056 mv_dump_pci_cfg(pdev, 0x68);
4447d351 3057 mv_print_info(host);
20f733e7 3058
4447d351 3059 pci_set_master(pdev);
ea8b4db9 3060 pci_try_set_mwi(pdev);
4447d351 3061 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 3062 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 3063}
7bb3c529 3064#endif
20f733e7 3065
f351b2d6
SB
3066static int mv_platform_probe(struct platform_device *pdev);
3067static int __devexit mv_platform_remove(struct platform_device *pdev);
3068
20f733e7
BR
3069static int __init mv_init(void)
3070{
7bb3c529
SB
3071 int rc = -ENODEV;
3072#ifdef CONFIG_PCI
3073 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
3074 if (rc < 0)
3075 return rc;
3076#endif
3077 rc = platform_driver_register(&mv_platform_driver);
3078
3079#ifdef CONFIG_PCI
3080 if (rc < 0)
3081 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
3082#endif
3083 return rc;
20f733e7
BR
3084}
3085
3086static void __exit mv_exit(void)
3087{
7bb3c529 3088#ifdef CONFIG_PCI
20f733e7 3089 pci_unregister_driver(&mv_pci_driver);
7bb3c529 3090#endif
f351b2d6 3091 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
3092}
3093
3094MODULE_AUTHOR("Brett Russ");
3095MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3096MODULE_LICENSE("GPL");
3097MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3098MODULE_VERSION(DRV_VERSION);
2e7e1214 3099MODULE_ALIAS("platform:sata_mv");
20f733e7 3100
7bb3c529 3101#ifdef CONFIG_PCI
ddef9bb3
JG
3102module_param(msi, int, 0444);
3103MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
7bb3c529 3104#endif
ddef9bb3 3105
20f733e7
BR
3106module_init(mv_init);
3107module_exit(mv_exit);
This page took 0.488467 seconds and 5 git commands to generate.