[TUN]: Introduce the tun_net structure and init/exit net ops.
[deliverable/linux.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
1fd2e1c2
ML
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
4a05e209
JG
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
4a05e209
JG
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
4a05e209
JG
62*/
63
64
20f733e7
BR
65#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
8d8b6004 72#include <linux/dmapool.h>
20f733e7 73#include <linux/dma-mapping.h>
a9524a76 74#include <linux/device.h>
f351b2d6
SB
75#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
20f733e7 77#include <scsi/scsi_host.h>
193515d5 78#include <scsi/scsi_cmnd.h>
6c08772e 79#include <scsi/scsi_device.h>
20f733e7 80#include <linux/libata.h>
20f733e7
BR
81
82#define DRV_NAME "sata_mv"
1fd2e1c2 83#define DRV_VERSION "1.20"
20f733e7
BR
84
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
20f733e7 102 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 103 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
20f733e7
BR
106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
31961943
BR
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 121 MV_MAX_SG_CT = 256,
31961943 122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 123
20f733e7
BR
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
31961943 127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
7bb3c529
SB
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
c5d3e45a 136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
47c2b677 139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 140
31961943
BR
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
20f733e7
BR
155 /* PCI interface registers */
156
31961943
BR
157 PCI_COMMAND_OFS = 0xc00,
158
20f733e7
BR
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
522479fb
JG
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
02a121da
ML
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
02a121da
ML
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 182
20f733e7
BR
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
f351b2d6
SB
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
20f733e7
BR
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
f351b2d6 202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
8b260248 203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
fb621e2f
JG
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
f351b2d6 208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
20f733e7
BR
209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
31961943 214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
31961943
BR
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
0c58912e 225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
47c2b677 226 PHY_MODE3 = 0x310,
bca1c4eb
JG
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
c9d39130
JG
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
0c58912e
ML
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 260
6c1153e0 261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
6c1153e0 267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 268
6c1153e0 269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
6c1153e0 276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 277
6c1153e0 278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
bdd4ddde
JG
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
6c1153e0 293 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
6c1153e0 308 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
20f733e7 312
31961943
BR
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
0ea9e179
JG
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 328
c9d39130 329 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 330 EDMA_ARB_CFG = 0x38,
bca1c4eb 331
31961943
BR
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 338 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 343
31961943 344 /* Port private flags (pp_flags) */
0ea9e179 345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
0ea9e179 347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
348};
349
ee9ccdf7
JG
350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
7bb3c529 353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
bca1c4eb 354
095fec88 355enum {
baf14aa1
JG
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
095fec88 360
0ea9e179
JG
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
095fec88
JG
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
0ea9e179 366 /* ditto, for response queue */
095fec88
JG
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368};
369
522479fb
JG
370enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
e4e7b892
JG
376 chip_6042,
377 chip_7042,
f351b2d6 378 chip_soc,
522479fb
JG
379};
380
31961943
BR
381/* Command ReQuest Block: 32B */
382struct mv_crqb {
e1469874
ML
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
31961943 387};
20f733e7 388
e4e7b892 389struct mv_crqb_iie {
e1469874
ML
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
e4e7b892
JG
395};
396
31961943
BR
397/* Command ResPonse Block: 8B */
398struct mv_crpb {
e1469874
ML
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
20f733e7
BR
402};
403
31961943
BR
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
e1469874
ML
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
31961943 410};
20f733e7 411
31961943
BR
412struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
eb73d558
ML
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
31961943
BR
423 u32 pp_flags;
424};
425
bca1c4eb
JG
426struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429};
430
02a121da
ML
431struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
f351b2d6
SB
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
02a121da
ML
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
da2fa9ba
ML
442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
02a121da
ML
450};
451
47c2b677 452struct mv_hw_ops {
2a47ce06
JG
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
47c2b677
JG
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
c9d39130
JG
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
522479fb 460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
462};
463
20f733e7 464static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
465static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
466static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
467static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
468static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
469static int mv_port_start(struct ata_port *ap);
470static void mv_port_stop(struct ata_port *ap);
471static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 472static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 473static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde 474static void mv_error_handler(struct ata_port *ap);
bdd4ddde
JG
475static void mv_eh_freeze(struct ata_port *ap);
476static void mv_eh_thaw(struct ata_port *ap);
f273827e 477static void mv6_dev_config(struct ata_device *dev);
20f733e7 478
2a47ce06
JG
479static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
480 unsigned int port);
47c2b677
JG
481static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
482static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
483 void __iomem *mmio);
c9d39130
JG
484static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
485 unsigned int n_hc);
522479fb 486static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 487static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 488
2a47ce06
JG
489static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
490 unsigned int port);
47c2b677
JG
491static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
492static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
493 void __iomem *mmio);
c9d39130
JG
494static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
495 unsigned int n_hc);
522479fb 496static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
497static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
498 void __iomem *mmio);
499static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
500 void __iomem *mmio);
501static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
502 void __iomem *mmio, unsigned int n_hc);
503static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
504 void __iomem *mmio);
505static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
7bb3c529 506static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
c9d39130
JG
507static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port_no);
72109168
ML
509static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
510 void __iomem *port_mmio, int want_ncq);
511static int __mv_stop_dma(struct ata_port *ap);
47c2b677 512
eb73d558
ML
513/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
514 * because we have to allow room for worst case splitting of
515 * PRDs for 64K boundaries in mv_fill_sg().
516 */
c5d3e45a
JG
517static struct scsi_host_template mv5_sht = {
518 .module = THIS_MODULE,
519 .name = DRV_NAME,
520 .ioctl = ata_scsi_ioctl,
521 .queuecommand = ata_scsi_queuecmd,
522 .can_queue = ATA_DEF_QUEUE,
523 .this_id = ATA_SHT_THIS_ID,
baf14aa1 524 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
525 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
526 .emulated = ATA_SHT_EMULATED,
527 .use_clustering = 1,
528 .proc_name = DRV_NAME,
529 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 530 .slave_configure = ata_scsi_slave_config,
c5d3e45a
JG
531 .slave_destroy = ata_scsi_slave_destroy,
532 .bios_param = ata_std_bios_param,
533};
534
535static struct scsi_host_template mv6_sht = {
20f733e7
BR
536 .module = THIS_MODULE,
537 .name = DRV_NAME,
538 .ioctl = ata_scsi_ioctl,
539 .queuecommand = ata_scsi_queuecmd,
138bfdd0
ML
540 .change_queue_depth = ata_scsi_change_queue_depth,
541 .can_queue = MV_MAX_Q_DEPTH - 1,
20f733e7 542 .this_id = ATA_SHT_THIS_ID,
baf14aa1 543 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
544 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
545 .emulated = ATA_SHT_EMULATED,
d88184fb 546 .use_clustering = 1,
20f733e7
BR
547 .proc_name = DRV_NAME,
548 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 549 .slave_configure = ata_scsi_slave_config,
ccf68c34 550 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 551 .bios_param = ata_std_bios_param,
20f733e7
BR
552};
553
c9d39130 554static const struct ata_port_operations mv5_ops = {
c9d39130
JG
555 .tf_load = ata_tf_load,
556 .tf_read = ata_tf_read,
557 .check_status = ata_check_status,
558 .exec_command = ata_exec_command,
559 .dev_select = ata_std_dev_select,
560
cffacd85 561 .cable_detect = ata_cable_sata,
c9d39130
JG
562
563 .qc_prep = mv_qc_prep,
564 .qc_issue = mv_qc_issue,
0d5ff566 565 .data_xfer = ata_data_xfer,
c9d39130 566
c9d39130 567 .irq_clear = mv_irq_clear,
246ce3b6 568 .irq_on = ata_irq_on,
c9d39130 569
bdd4ddde 570 .error_handler = mv_error_handler,
bdd4ddde
JG
571 .freeze = mv_eh_freeze,
572 .thaw = mv_eh_thaw,
573
c9d39130
JG
574 .scr_read = mv5_scr_read,
575 .scr_write = mv5_scr_write,
576
577 .port_start = mv_port_start,
578 .port_stop = mv_port_stop,
c9d39130
JG
579};
580
581static const struct ata_port_operations mv6_ops = {
f273827e 582 .dev_config = mv6_dev_config,
20f733e7
BR
583 .tf_load = ata_tf_load,
584 .tf_read = ata_tf_read,
585 .check_status = ata_check_status,
586 .exec_command = ata_exec_command,
587 .dev_select = ata_std_dev_select,
588
cffacd85 589 .cable_detect = ata_cable_sata,
20f733e7 590
31961943
BR
591 .qc_prep = mv_qc_prep,
592 .qc_issue = mv_qc_issue,
0d5ff566 593 .data_xfer = ata_data_xfer,
20f733e7 594
20f733e7 595 .irq_clear = mv_irq_clear,
246ce3b6 596 .irq_on = ata_irq_on,
20f733e7 597
bdd4ddde 598 .error_handler = mv_error_handler,
bdd4ddde
JG
599 .freeze = mv_eh_freeze,
600 .thaw = mv_eh_thaw,
138bfdd0 601 .qc_defer = ata_std_qc_defer,
bdd4ddde 602
20f733e7
BR
603 .scr_read = mv_scr_read,
604 .scr_write = mv_scr_write,
605
31961943
BR
606 .port_start = mv_port_start,
607 .port_stop = mv_port_stop,
20f733e7
BR
608};
609
e4e7b892 610static const struct ata_port_operations mv_iie_ops = {
e4e7b892
JG
611 .tf_load = ata_tf_load,
612 .tf_read = ata_tf_read,
613 .check_status = ata_check_status,
614 .exec_command = ata_exec_command,
615 .dev_select = ata_std_dev_select,
616
cffacd85 617 .cable_detect = ata_cable_sata,
e4e7b892
JG
618
619 .qc_prep = mv_qc_prep_iie,
620 .qc_issue = mv_qc_issue,
0d5ff566 621 .data_xfer = ata_data_xfer,
e4e7b892 622
e4e7b892 623 .irq_clear = mv_irq_clear,
246ce3b6 624 .irq_on = ata_irq_on,
e4e7b892 625
bdd4ddde 626 .error_handler = mv_error_handler,
bdd4ddde
JG
627 .freeze = mv_eh_freeze,
628 .thaw = mv_eh_thaw,
138bfdd0 629 .qc_defer = ata_std_qc_defer,
bdd4ddde 630
e4e7b892
JG
631 .scr_read = mv_scr_read,
632 .scr_write = mv_scr_write,
633
634 .port_start = mv_port_start,
635 .port_stop = mv_port_stop,
e4e7b892
JG
636};
637
98ac62de 638static const struct ata_port_info mv_port_info[] = {
20f733e7 639 { /* chip_504x */
cca3974e 640 .flags = MV_COMMON_FLAGS,
31961943 641 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 642 .udma_mask = ATA_UDMA6,
c9d39130 643 .port_ops = &mv5_ops,
20f733e7
BR
644 },
645 { /* chip_508x */
c5d3e45a 646 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 647 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 648 .udma_mask = ATA_UDMA6,
c9d39130 649 .port_ops = &mv5_ops,
20f733e7 650 },
47c2b677 651 { /* chip_5080 */
c5d3e45a 652 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 653 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 654 .udma_mask = ATA_UDMA6,
c9d39130 655 .port_ops = &mv5_ops,
47c2b677 656 },
20f733e7 657 { /* chip_604x */
138bfdd0
ML
658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
659 ATA_FLAG_NCQ,
31961943 660 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 661 .udma_mask = ATA_UDMA6,
c9d39130 662 .port_ops = &mv6_ops,
20f733e7
BR
663 },
664 { /* chip_608x */
c5d3e45a 665 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
138bfdd0 666 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
31961943 667 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 668 .udma_mask = ATA_UDMA6,
c9d39130 669 .port_ops = &mv6_ops,
20f733e7 670 },
e4e7b892 671 { /* chip_6042 */
138bfdd0
ML
672 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
673 ATA_FLAG_NCQ,
e4e7b892 674 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 675 .udma_mask = ATA_UDMA6,
e4e7b892
JG
676 .port_ops = &mv_iie_ops,
677 },
678 { /* chip_7042 */
138bfdd0
ML
679 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
680 ATA_FLAG_NCQ,
e4e7b892 681 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 682 .udma_mask = ATA_UDMA6,
e4e7b892
JG
683 .port_ops = &mv_iie_ops,
684 },
f351b2d6
SB
685 { /* chip_soc */
686 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
687 .pio_mask = 0x1f, /* pio0-4 */
688 .udma_mask = ATA_UDMA6,
689 .port_ops = &mv_iie_ops,
690 },
20f733e7
BR
691};
692
3b7d697d 693static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
694 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
695 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
696 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
697 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
698 /* RocketRAID 1740/174x have different identifiers */
699 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
700 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
701
702 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
703 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
704 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
705 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
706 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
707
708 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
709
d9f9c6bc
FA
710 /* Adaptec 1430SA */
711 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
712
02a121da 713 /* Marvell 7042 support */
6a3d586d
MT
714 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
715
02a121da
ML
716 /* Highpoint RocketRAID PCIe series */
717 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
718 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
719
2d2744fc 720 { } /* terminate list */
20f733e7
BR
721};
722
47c2b677
JG
723static const struct mv_hw_ops mv5xxx_ops = {
724 .phy_errata = mv5_phy_errata,
725 .enable_leds = mv5_enable_leds,
726 .read_preamp = mv5_read_preamp,
727 .reset_hc = mv5_reset_hc,
522479fb
JG
728 .reset_flash = mv5_reset_flash,
729 .reset_bus = mv5_reset_bus,
47c2b677
JG
730};
731
732static const struct mv_hw_ops mv6xxx_ops = {
733 .phy_errata = mv6_phy_errata,
734 .enable_leds = mv6_enable_leds,
735 .read_preamp = mv6_read_preamp,
736 .reset_hc = mv6_reset_hc,
522479fb
JG
737 .reset_flash = mv6_reset_flash,
738 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
739};
740
f351b2d6
SB
741static const struct mv_hw_ops mv_soc_ops = {
742 .phy_errata = mv6_phy_errata,
743 .enable_leds = mv_soc_enable_leds,
744 .read_preamp = mv_soc_read_preamp,
745 .reset_hc = mv_soc_reset_hc,
746 .reset_flash = mv_soc_reset_flash,
747 .reset_bus = mv_soc_reset_bus,
748};
749
20f733e7
BR
750/*
751 * Functions
752 */
753
754static inline void writelfl(unsigned long data, void __iomem *addr)
755{
756 writel(data, addr);
757 (void) readl(addr); /* flush to avoid PCI posted write */
758}
759
20f733e7
BR
760static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
761{
762 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
763}
764
c9d39130
JG
765static inline unsigned int mv_hc_from_port(unsigned int port)
766{
767 return port >> MV_PORT_HC_SHIFT;
768}
769
770static inline unsigned int mv_hardport_from_port(unsigned int port)
771{
772 return port & MV_PORT_MASK;
773}
774
775static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
776 unsigned int port)
777{
778 return mv_hc_base(base, mv_hc_from_port(port));
779}
780
20f733e7
BR
781static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
782{
c9d39130 783 return mv_hc_base_from_port(base, port) +
8b260248 784 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 785 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
786}
787
f351b2d6
SB
788static inline void __iomem *mv_host_base(struct ata_host *host)
789{
790 struct mv_host_priv *hpriv = host->private_data;
791 return hpriv->base;
792}
793
20f733e7
BR
794static inline void __iomem *mv_ap_base(struct ata_port *ap)
795{
f351b2d6 796 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
797}
798
cca3974e 799static inline int mv_get_hc_count(unsigned long port_flags)
31961943 800{
cca3974e 801 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
802}
803
804static void mv_irq_clear(struct ata_port *ap)
20f733e7 805{
20f733e7
BR
806}
807
c5d3e45a
JG
808static void mv_set_edma_ptrs(void __iomem *port_mmio,
809 struct mv_host_priv *hpriv,
810 struct mv_port_priv *pp)
811{
bdd4ddde
JG
812 u32 index;
813
c5d3e45a
JG
814 /*
815 * initialize request queue
816 */
bdd4ddde
JG
817 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
818
c5d3e45a
JG
819 WARN_ON(pp->crqb_dma & 0x3ff);
820 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 821 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
822 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
823
824 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 825 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
826 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
827 else
bdd4ddde 828 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
829
830 /*
831 * initialize response queue
832 */
bdd4ddde
JG
833 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
834
c5d3e45a
JG
835 WARN_ON(pp->crpb_dma & 0xff);
836 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
837
838 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 839 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
840 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
841 else
bdd4ddde 842 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 843
bdd4ddde 844 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 845 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
846}
847
05b308e1
BR
848/**
849 * mv_start_dma - Enable eDMA engine
850 * @base: port base address
851 * @pp: port private data
852 *
beec7dbc
TH
853 * Verify the local cache of the eDMA state is accurate with a
854 * WARN_ON.
05b308e1
BR
855 *
856 * LOCKING:
857 * Inherited from caller.
858 */
0c58912e 859static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 860 struct mv_port_priv *pp, u8 protocol)
20f733e7 861{
72109168
ML
862 int want_ncq = (protocol == ATA_PROT_NCQ);
863
864 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
865 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
866 if (want_ncq != using_ncq)
867 __mv_stop_dma(ap);
868 }
c5d3e45a 869 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e
ML
870 struct mv_host_priv *hpriv = ap->host->private_data;
871 int hard_port = mv_hardport_from_port(ap->port_no);
872 void __iomem *hc_mmio = mv_hc_base_from_port(
0fca0d6f 873 mv_host_base(ap->host), hard_port);
0c58912e
ML
874 u32 hc_irq_cause, ipending;
875
bdd4ddde 876 /* clear EDMA event indicators, if any */
f630d562 877 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 878
0c58912e
ML
879 /* clear EDMA interrupt indicator, if any */
880 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
881 ipending = (DEV_IRQ << hard_port) |
882 (CRPB_DMA_DONE << hard_port);
883 if (hc_irq_cause & ipending) {
884 writelfl(hc_irq_cause & ~ipending,
885 hc_mmio + HC_IRQ_CAUSE_OFS);
886 }
887
72109168 888 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
0c58912e
ML
889
890 /* clear FIS IRQ Cause */
891 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
892
f630d562 893 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 894
f630d562 895 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
896 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
897 }
f630d562 898 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
20f733e7
BR
899}
900
05b308e1 901/**
0ea9e179 902 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
903 * @ap: ATA channel to manipulate
904 *
beec7dbc
TH
905 * Verify the local cache of the eDMA state is accurate with a
906 * WARN_ON.
05b308e1
BR
907 *
908 * LOCKING:
909 * Inherited from caller.
910 */
0ea9e179 911static int __mv_stop_dma(struct ata_port *ap)
20f733e7 912{
31961943
BR
913 void __iomem *port_mmio = mv_ap_base(ap);
914 struct mv_port_priv *pp = ap->private_data;
31961943 915 u32 reg;
c5d3e45a 916 int i, err = 0;
31961943 917
4537deb5 918 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 919 /* Disable EDMA if active. The disable bit auto clears.
31961943 920 */
31961943
BR
921 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
922 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 923 } else {
beec7dbc 924 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
2dcb407e 925 }
8b260248 926
31961943
BR
927 /* now properly wait for the eDMA to stop */
928 for (i = 1000; i > 0; i--) {
929 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 930 if (!(reg & EDMA_EN))
31961943 931 break;
4537deb5 932
31961943
BR
933 udelay(100);
934 }
935
c5d3e45a 936 if (reg & EDMA_EN) {
f15a1daf 937 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 938 err = -EIO;
31961943 939 }
c5d3e45a
JG
940
941 return err;
20f733e7
BR
942}
943
0ea9e179
JG
944static int mv_stop_dma(struct ata_port *ap)
945{
946 unsigned long flags;
947 int rc;
948
949 spin_lock_irqsave(&ap->host->lock, flags);
950 rc = __mv_stop_dma(ap);
951 spin_unlock_irqrestore(&ap->host->lock, flags);
952
953 return rc;
954}
955
8a70f8dc 956#ifdef ATA_DEBUG
31961943 957static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 958{
31961943
BR
959 int b, w;
960 for (b = 0; b < bytes; ) {
961 DPRINTK("%p: ", start + b);
962 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 963 printk("%08x ", readl(start + b));
31961943
BR
964 b += sizeof(u32);
965 }
966 printk("\n");
967 }
31961943 968}
8a70f8dc
JG
969#endif
970
31961943
BR
971static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
972{
973#ifdef ATA_DEBUG
974 int b, w;
975 u32 dw;
976 for (b = 0; b < bytes; ) {
977 DPRINTK("%02x: ", b);
978 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
979 (void) pci_read_config_dword(pdev, b, &dw);
980 printk("%08x ", dw);
31961943
BR
981 b += sizeof(u32);
982 }
983 printk("\n");
984 }
985#endif
986}
987static void mv_dump_all_regs(void __iomem *mmio_base, int port,
988 struct pci_dev *pdev)
989{
990#ifdef ATA_DEBUG
8b260248 991 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
992 port >> MV_PORT_HC_SHIFT);
993 void __iomem *port_base;
994 int start_port, num_ports, p, start_hc, num_hcs, hc;
995
996 if (0 > port) {
997 start_hc = start_port = 0;
998 num_ports = 8; /* shld be benign for 4 port devs */
999 num_hcs = 2;
1000 } else {
1001 start_hc = port >> MV_PORT_HC_SHIFT;
1002 start_port = port;
1003 num_ports = num_hcs = 1;
1004 }
8b260248 1005 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
1006 num_ports > 1 ? num_ports - 1 : start_port);
1007
1008 if (NULL != pdev) {
1009 DPRINTK("PCI config space regs:\n");
1010 mv_dump_pci_cfg(pdev, 0x68);
1011 }
1012 DPRINTK("PCI regs:\n");
1013 mv_dump_mem(mmio_base+0xc00, 0x3c);
1014 mv_dump_mem(mmio_base+0xd00, 0x34);
1015 mv_dump_mem(mmio_base+0xf00, 0x4);
1016 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1017 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 1018 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
1019 DPRINTK("HC regs (HC %i):\n", hc);
1020 mv_dump_mem(hc_base, 0x1c);
1021 }
1022 for (p = start_port; p < start_port + num_ports; p++) {
1023 port_base = mv_port_base(mmio_base, p);
2dcb407e 1024 DPRINTK("EDMA regs (port %i):\n", p);
31961943 1025 mv_dump_mem(port_base, 0x54);
2dcb407e 1026 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
1027 mv_dump_mem(port_base+0x300, 0x60);
1028 }
1029#endif
20f733e7
BR
1030}
1031
1032static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1033{
1034 unsigned int ofs;
1035
1036 switch (sc_reg_in) {
1037 case SCR_STATUS:
1038 case SCR_CONTROL:
1039 case SCR_ERROR:
1040 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1041 break;
1042 case SCR_ACTIVE:
1043 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1044 break;
1045 default:
1046 ofs = 0xffffffffU;
1047 break;
1048 }
1049 return ofs;
1050}
1051
da3dbb17 1052static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1053{
1054 unsigned int ofs = mv_scr_offset(sc_reg_in);
1055
da3dbb17
TH
1056 if (ofs != 0xffffffffU) {
1057 *val = readl(mv_ap_base(ap) + ofs);
1058 return 0;
1059 } else
1060 return -EINVAL;
20f733e7
BR
1061}
1062
da3dbb17 1063static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1064{
1065 unsigned int ofs = mv_scr_offset(sc_reg_in);
1066
da3dbb17 1067 if (ofs != 0xffffffffU) {
20f733e7 1068 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1069 return 0;
1070 } else
1071 return -EINVAL;
20f733e7
BR
1072}
1073
f273827e
ML
1074static void mv6_dev_config(struct ata_device *adev)
1075{
1076 /*
1077 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1078 * See mv_qc_prep() for more info.
1079 */
1080 if (adev->flags & ATA_DFLAG_NCQ)
1081 if (adev->max_sectors > ATA_MAX_SECTORS)
1082 adev->max_sectors = ATA_MAX_SECTORS;
1083}
1084
72109168
ML
1085static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1086 void __iomem *port_mmio, int want_ncq)
e4e7b892 1087{
0c58912e 1088 u32 cfg;
e4e7b892
JG
1089
1090 /* set up non-NCQ EDMA configuration */
0c58912e 1091 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1092
0c58912e 1093 if (IS_GEN_I(hpriv))
e4e7b892
JG
1094 cfg |= (1 << 8); /* enab config burst size mask */
1095
0c58912e 1096 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1097 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1098
1099 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1100 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1101 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1102 cfg |= (1 << 18); /* enab early completion */
e728eabe 1103 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e4e7b892
JG
1104 }
1105
72109168
ML
1106 if (want_ncq) {
1107 cfg |= EDMA_CFG_NCQ;
1108 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1109 } else
1110 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1111
e4e7b892
JG
1112 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1113}
1114
da2fa9ba
ML
1115static void mv_port_free_dma_mem(struct ata_port *ap)
1116{
1117 struct mv_host_priv *hpriv = ap->host->private_data;
1118 struct mv_port_priv *pp = ap->private_data;
eb73d558 1119 int tag;
da2fa9ba
ML
1120
1121 if (pp->crqb) {
1122 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1123 pp->crqb = NULL;
1124 }
1125 if (pp->crpb) {
1126 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1127 pp->crpb = NULL;
1128 }
eb73d558
ML
1129 /*
1130 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1131 * For later hardware, we have one unique sg_tbl per NCQ tag.
1132 */
1133 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1134 if (pp->sg_tbl[tag]) {
1135 if (tag == 0 || !IS_GEN_I(hpriv))
1136 dma_pool_free(hpriv->sg_tbl_pool,
1137 pp->sg_tbl[tag],
1138 pp->sg_tbl_dma[tag]);
1139 pp->sg_tbl[tag] = NULL;
1140 }
da2fa9ba
ML
1141 }
1142}
1143
05b308e1
BR
1144/**
1145 * mv_port_start - Port specific init/start routine.
1146 * @ap: ATA channel to manipulate
1147 *
1148 * Allocate and point to DMA memory, init port private memory,
1149 * zero indices.
1150 *
1151 * LOCKING:
1152 * Inherited from caller.
1153 */
31961943
BR
1154static int mv_port_start(struct ata_port *ap)
1155{
cca3974e
JG
1156 struct device *dev = ap->host->dev;
1157 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1158 struct mv_port_priv *pp;
1159 void __iomem *port_mmio = mv_ap_base(ap);
0ea9e179 1160 unsigned long flags;
dde20207 1161 int tag;
31961943 1162
24dc5f33 1163 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1164 if (!pp)
24dc5f33 1165 return -ENOMEM;
da2fa9ba 1166 ap->private_data = pp;
31961943 1167
da2fa9ba
ML
1168 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1169 if (!pp->crqb)
1170 return -ENOMEM;
1171 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1172
da2fa9ba
ML
1173 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1174 if (!pp->crpb)
1175 goto out_port_free_dma_mem;
1176 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1177
eb73d558
ML
1178 /*
1179 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1180 * For later hardware, we need one unique sg_tbl per NCQ tag.
1181 */
1182 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1183 if (tag == 0 || !IS_GEN_I(hpriv)) {
1184 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1185 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1186 if (!pp->sg_tbl[tag])
1187 goto out_port_free_dma_mem;
1188 } else {
1189 pp->sg_tbl[tag] = pp->sg_tbl[0];
1190 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1191 }
1192 }
31961943 1193
0ea9e179
JG
1194 spin_lock_irqsave(&ap->host->lock, flags);
1195
72109168 1196 mv_edma_cfg(pp, hpriv, port_mmio, 0);
c5d3e45a 1197 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1198
0ea9e179
JG
1199 spin_unlock_irqrestore(&ap->host->lock, flags);
1200
31961943
BR
1201 /* Don't turn on EDMA here...do it before DMA commands only. Else
1202 * we'll be unable to send non-data, PIO, etc due to restricted access
1203 * to shadow regs.
1204 */
31961943 1205 return 0;
da2fa9ba
ML
1206
1207out_port_free_dma_mem:
1208 mv_port_free_dma_mem(ap);
1209 return -ENOMEM;
31961943
BR
1210}
1211
05b308e1
BR
1212/**
1213 * mv_port_stop - Port specific cleanup/stop routine.
1214 * @ap: ATA channel to manipulate
1215 *
1216 * Stop DMA, cleanup port memory.
1217 *
1218 * LOCKING:
cca3974e 1219 * This routine uses the host lock to protect the DMA stop.
05b308e1 1220 */
31961943
BR
1221static void mv_port_stop(struct ata_port *ap)
1222{
31961943 1223 mv_stop_dma(ap);
da2fa9ba 1224 mv_port_free_dma_mem(ap);
31961943
BR
1225}
1226
05b308e1
BR
1227/**
1228 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1229 * @qc: queued command whose SG list to source from
1230 *
1231 * Populate the SG list and mark the last entry.
1232 *
1233 * LOCKING:
1234 * Inherited from caller.
1235 */
6c08772e 1236static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1237{
1238 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1239 struct scatterlist *sg;
3be6cbd7 1240 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1241 unsigned int si;
31961943 1242
eb73d558 1243 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1244 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1245 dma_addr_t addr = sg_dma_address(sg);
1246 u32 sg_len = sg_dma_len(sg);
22374677 1247
4007b493
OJ
1248 while (sg_len) {
1249 u32 offset = addr & 0xffff;
1250 u32 len = sg_len;
22374677 1251
4007b493
OJ
1252 if ((offset + sg_len > 0x10000))
1253 len = 0x10000 - offset;
1254
1255 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1256 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1257 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1258
1259 sg_len -= len;
1260 addr += len;
1261
3be6cbd7 1262 last_sg = mv_sg;
4007b493 1263 mv_sg++;
4007b493 1264 }
31961943 1265 }
3be6cbd7
JG
1266
1267 if (likely(last_sg))
1268 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1269}
1270
5796d1c4 1271static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1272{
559eedad 1273 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1274 (last ? CRQB_CMD_LAST : 0);
559eedad 1275 *cmdw = cpu_to_le16(tmp);
31961943
BR
1276}
1277
05b308e1
BR
1278/**
1279 * mv_qc_prep - Host specific command preparation.
1280 * @qc: queued command to prepare
1281 *
1282 * This routine simply redirects to the general purpose routine
1283 * if command is not DMA. Else, it handles prep of the CRQB
1284 * (command request block), does some sanity checking, and calls
1285 * the SG load routine.
1286 *
1287 * LOCKING:
1288 * Inherited from caller.
1289 */
31961943
BR
1290static void mv_qc_prep(struct ata_queued_cmd *qc)
1291{
1292 struct ata_port *ap = qc->ap;
1293 struct mv_port_priv *pp = ap->private_data;
e1469874 1294 __le16 *cw;
31961943
BR
1295 struct ata_taskfile *tf;
1296 u16 flags = 0;
a6432436 1297 unsigned in_index;
31961943 1298
138bfdd0
ML
1299 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1300 (qc->tf.protocol != ATA_PROT_NCQ))
31961943 1301 return;
20f733e7 1302
31961943
BR
1303 /* Fill in command request block
1304 */
e4e7b892 1305 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1306 flags |= CRQB_FLAG_READ;
beec7dbc 1307 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943
BR
1308 flags |= qc->tag << CRQB_TAG_SHIFT;
1309
bdd4ddde
JG
1310 /* get current queue index from software */
1311 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1312
1313 pp->crqb[in_index].sg_addr =
eb73d558 1314 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 1315 pp->crqb[in_index].sg_addr_hi =
eb73d558 1316 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 1317 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1318
a6432436 1319 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1320 tf = &qc->tf;
1321
1322 /* Sadly, the CRQB cannot accomodate all registers--there are
1323 * only 11 bytes...so we must pick and choose required
1324 * registers based on the command. So, we drop feature and
1325 * hob_feature for [RW] DMA commands, but they are needed for
1326 * NCQ. NCQ will drop hob_nsect.
20f733e7 1327 */
31961943
BR
1328 switch (tf->command) {
1329 case ATA_CMD_READ:
1330 case ATA_CMD_READ_EXT:
1331 case ATA_CMD_WRITE:
1332 case ATA_CMD_WRITE_EXT:
c15d85c8 1333 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1334 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1335 break;
31961943
BR
1336 case ATA_CMD_FPDMA_READ:
1337 case ATA_CMD_FPDMA_WRITE:
8b260248 1338 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1339 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1340 break;
31961943
BR
1341 default:
1342 /* The only other commands EDMA supports in non-queued and
1343 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1344 * of which are defined/used by Linux. If we get here, this
1345 * driver needs work.
1346 *
1347 * FIXME: modify libata to give qc_prep a return value and
1348 * return error here.
1349 */
1350 BUG_ON(tf->command);
1351 break;
1352 }
1353 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1354 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1355 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1356 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1357 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1360 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1361 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1362
e4e7b892
JG
1363 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1364 return;
1365 mv_fill_sg(qc);
1366}
1367
1368/**
1369 * mv_qc_prep_iie - Host specific command preparation.
1370 * @qc: queued command to prepare
1371 *
1372 * This routine simply redirects to the general purpose routine
1373 * if command is not DMA. Else, it handles prep of the CRQB
1374 * (command request block), does some sanity checking, and calls
1375 * the SG load routine.
1376 *
1377 * LOCKING:
1378 * Inherited from caller.
1379 */
1380static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1381{
1382 struct ata_port *ap = qc->ap;
1383 struct mv_port_priv *pp = ap->private_data;
1384 struct mv_crqb_iie *crqb;
1385 struct ata_taskfile *tf;
a6432436 1386 unsigned in_index;
e4e7b892
JG
1387 u32 flags = 0;
1388
138bfdd0
ML
1389 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1390 (qc->tf.protocol != ATA_PROT_NCQ))
e4e7b892
JG
1391 return;
1392
e4e7b892
JG
1393 /* Fill in Gen IIE command request block
1394 */
1395 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1396 flags |= CRQB_FLAG_READ;
1397
beec7dbc 1398 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1399 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 1400 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e4e7b892 1401
bdd4ddde
JG
1402 /* get current queue index from software */
1403 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1404
1405 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
1406 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1407 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
1408 crqb->flags = cpu_to_le32(flags);
1409
1410 tf = &qc->tf;
1411 crqb->ata_cmd[0] = cpu_to_le32(
1412 (tf->command << 16) |
1413 (tf->feature << 24)
1414 );
1415 crqb->ata_cmd[1] = cpu_to_le32(
1416 (tf->lbal << 0) |
1417 (tf->lbam << 8) |
1418 (tf->lbah << 16) |
1419 (tf->device << 24)
1420 );
1421 crqb->ata_cmd[2] = cpu_to_le32(
1422 (tf->hob_lbal << 0) |
1423 (tf->hob_lbam << 8) |
1424 (tf->hob_lbah << 16) |
1425 (tf->hob_feature << 24)
1426 );
1427 crqb->ata_cmd[3] = cpu_to_le32(
1428 (tf->nsect << 0) |
1429 (tf->hob_nsect << 8)
1430 );
1431
1432 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1433 return;
31961943
BR
1434 mv_fill_sg(qc);
1435}
1436
05b308e1
BR
1437/**
1438 * mv_qc_issue - Initiate a command to the host
1439 * @qc: queued command to start
1440 *
1441 * This routine simply redirects to the general purpose routine
1442 * if command is not DMA. Else, it sanity checks our local
1443 * caches of the request producer/consumer indices then enables
1444 * DMA and bumps the request producer index.
1445 *
1446 * LOCKING:
1447 * Inherited from caller.
1448 */
9a3d9eb0 1449static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1450{
c5d3e45a
JG
1451 struct ata_port *ap = qc->ap;
1452 void __iomem *port_mmio = mv_ap_base(ap);
1453 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1454 u32 in_index;
31961943 1455
138bfdd0
ML
1456 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1457 (qc->tf.protocol != ATA_PROT_NCQ)) {
31961943
BR
1458 /* We're about to send a non-EDMA capable command to the
1459 * port. Turn off EDMA so there won't be problems accessing
1460 * shadow block, etc registers.
1461 */
0ea9e179 1462 __mv_stop_dma(ap);
31961943
BR
1463 return ata_qc_issue_prot(qc);
1464 }
1465
72109168 1466 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde 1467
bdd4ddde 1468 pp->req_idx++;
31961943 1469
bdd4ddde 1470 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1471
1472 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1473 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1474 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1475
1476 return 0;
1477}
1478
05b308e1
BR
1479/**
1480 * mv_err_intr - Handle error interrupts on the port
1481 * @ap: ATA channel to manipulate
9b358e30 1482 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1483 *
1484 * In most cases, just clear the interrupt and move on. However,
1485 * some cases require an eDMA reset, which is done right before
1486 * the COMRESET in mv_phy_reset(). The SERR case requires a
1487 * clear of pending errors in the SATA SERROR register. Finally,
1488 * if the port disabled DMA, update our cached copy to match.
1489 *
1490 * LOCKING:
1491 * Inherited from caller.
1492 */
bdd4ddde 1493static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1494{
1495 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1496 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1497 struct mv_port_priv *pp = ap->private_data;
1498 struct mv_host_priv *hpriv = ap->host->private_data;
1499 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1500 unsigned int action = 0, err_mask = 0;
9af5c9c9 1501 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1502
bdd4ddde 1503 ata_ehi_clear_desc(ehi);
20f733e7 1504
bdd4ddde
JG
1505 if (!edma_enabled) {
1506 /* just a guess: do we need to do this? should we
1507 * expand this, and do it in all cases?
1508 */
936fd732
TH
1509 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1510 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1511 }
bdd4ddde
JG
1512
1513 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1514
1515 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1516
1517 /*
1518 * all generations share these EDMA error cause bits
1519 */
1520
1521 if (edma_err_cause & EDMA_ERR_DEV)
1522 err_mask |= AC_ERR_DEV;
1523 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1524 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1525 EDMA_ERR_INTRL_PAR)) {
1526 err_mask |= AC_ERR_ATA_BUS;
1527 action |= ATA_EH_HARDRESET;
b64bbc39 1528 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1529 }
1530 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1531 ata_ehi_hotplugged(ehi);
1532 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1533 "dev disconnect" : "dev connect");
3606a380 1534 action |= ATA_EH_HARDRESET;
bdd4ddde
JG
1535 }
1536
ee9ccdf7 1537 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1538 eh_freeze_mask = EDMA_EH_FREEZE_5;
1539
1540 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
5ab063e3 1541 pp = ap->private_data;
bdd4ddde 1542 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1543 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1544 }
1545 } else {
1546 eh_freeze_mask = EDMA_EH_FREEZE;
1547
1548 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
5ab063e3 1549 pp = ap->private_data;
bdd4ddde 1550 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1551 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1552 }
1553
1554 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1555 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1556 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde
JG
1557 err_mask = AC_ERR_ATA_BUS;
1558 action |= ATA_EH_HARDRESET;
1559 }
afb0edd9 1560 }
20f733e7
BR
1561
1562 /* Clear EDMA now that SERR cleanup done */
3606a380 1563 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1564
bdd4ddde
JG
1565 if (!err_mask) {
1566 err_mask = AC_ERR_OTHER;
1567 action |= ATA_EH_HARDRESET;
1568 }
1569
1570 ehi->serror |= serr;
1571 ehi->action |= action;
1572
1573 if (qc)
1574 qc->err_mask |= err_mask;
1575 else
1576 ehi->err_mask |= err_mask;
1577
1578 if (edma_err_cause & eh_freeze_mask)
1579 ata_port_freeze(ap);
1580 else
1581 ata_port_abort(ap);
1582}
1583
1584static void mv_intr_pio(struct ata_port *ap)
1585{
1586 struct ata_queued_cmd *qc;
1587 u8 ata_status;
1588
1589 /* ignore spurious intr if drive still BUSY */
1590 ata_status = readb(ap->ioaddr.status_addr);
1591 if (unlikely(ata_status & ATA_BUSY))
1592 return;
1593
1594 /* get active ATA command */
9af5c9c9 1595 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1596 if (unlikely(!qc)) /* no active tag */
1597 return;
1598 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1599 return;
1600
1601 /* and finally, complete the ATA command */
1602 qc->err_mask |= ac_err_mask(ata_status);
1603 ata_qc_complete(qc);
1604}
1605
1606static void mv_intr_edma(struct ata_port *ap)
1607{
1608 void __iomem *port_mmio = mv_ap_base(ap);
1609 struct mv_host_priv *hpriv = ap->host->private_data;
1610 struct mv_port_priv *pp = ap->private_data;
1611 struct ata_queued_cmd *qc;
1612 u32 out_index, in_index;
1613 bool work_done = false;
1614
1615 /* get h/w response queue pointer */
1616 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1617 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1618
1619 while (1) {
1620 u16 status;
6c1153e0 1621 unsigned int tag;
bdd4ddde
JG
1622
1623 /* get s/w response queue last-read pointer, and compare */
1624 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1625 if (in_index == out_index)
1626 break;
1627
bdd4ddde 1628 /* 50xx: get active ATA command */
0ea9e179 1629 if (IS_GEN_I(hpriv))
9af5c9c9 1630 tag = ap->link.active_tag;
bdd4ddde 1631
6c1153e0
JG
1632 /* Gen II/IIE: get active ATA command via tag, to enable
1633 * support for queueing. this works transparently for
1634 * queued and non-queued modes.
bdd4ddde 1635 */
8c0aeb4a
ML
1636 else
1637 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
bdd4ddde 1638
6c1153e0 1639 qc = ata_qc_from_tag(ap, tag);
bdd4ddde 1640
cb924419
ML
1641 /* For non-NCQ mode, the lower 8 bits of status
1642 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1643 * which should be zero if all went well.
bdd4ddde
JG
1644 */
1645 status = le16_to_cpu(pp->crpb[out_index].flags);
cb924419 1646 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
bdd4ddde
JG
1647 mv_err_intr(ap, qc);
1648 return;
1649 }
1650
1651 /* and finally, complete the ATA command */
1652 if (qc) {
1653 qc->err_mask |=
1654 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1655 ata_qc_complete(qc);
1656 }
1657
0ea9e179 1658 /* advance software response queue pointer, to
bdd4ddde
JG
1659 * indicate (after the loop completes) to hardware
1660 * that we have consumed a response queue entry.
1661 */
1662 work_done = true;
1663 pp->resp_idx++;
1664 }
1665
1666 if (work_done)
1667 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1668 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1669 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1670}
1671
05b308e1
BR
1672/**
1673 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1674 * @host: host specific structure
05b308e1
BR
1675 * @relevant: port error bits relevant to this host controller
1676 * @hc: which host controller we're to look at
1677 *
1678 * Read then write clear the HC interrupt status then walk each
1679 * port connected to the HC and see if it needs servicing. Port
1680 * success ints are reported in the HC interrupt status reg, the
1681 * port error ints are reported in the higher level main
1682 * interrupt status register and thus are passed in via the
1683 * 'relevant' argument.
1684 *
1685 * LOCKING:
1686 * Inherited from caller.
1687 */
cca3974e 1688static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1689{
f351b2d6
SB
1690 struct mv_host_priv *hpriv = host->private_data;
1691 void __iomem *mmio = hpriv->base;
20f733e7 1692 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1693 u32 hc_irq_cause;
f351b2d6 1694 int port, port0, last_port;
20f733e7 1695
35177265 1696 if (hc == 0)
20f733e7 1697 port0 = 0;
35177265 1698 else
20f733e7 1699 port0 = MV_PORTS_PER_HC;
20f733e7 1700
f351b2d6
SB
1701 if (HAS_PCI(host))
1702 last_port = port0 + MV_PORTS_PER_HC;
1703 else
1704 last_port = port0 + hpriv->n_ports;
20f733e7
BR
1705 /* we'll need the HC success int register in most cases */
1706 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1707 if (!hc_irq_cause)
1708 return;
1709
1710 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1711
1712 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1713 hc, relevant, hc_irq_cause);
20f733e7 1714
8f71efe2 1715 for (port = port0; port < last_port; port++) {
cca3974e 1716 struct ata_port *ap = host->ports[port];
8f71efe2 1717 struct mv_port_priv *pp;
bdd4ddde 1718 int have_err_bits, hard_port, shift;
55d8ca4f 1719
bdd4ddde 1720 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1721 continue;
1722
8f71efe2
YL
1723 pp = ap->private_data;
1724
31961943 1725 shift = port << 1; /* (port * 2) */
20f733e7
BR
1726 if (port >= MV_PORTS_PER_HC) {
1727 shift++; /* skip bit 8 in the HC Main IRQ reg */
1728 }
bdd4ddde
JG
1729 have_err_bits = ((PORT0_ERR << shift) & relevant);
1730
1731 if (unlikely(have_err_bits)) {
1732 struct ata_queued_cmd *qc;
8b260248 1733
9af5c9c9 1734 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1735 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1736 continue;
1737
1738 mv_err_intr(ap, qc);
1739 continue;
1740 }
1741
1742 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1743
1744 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1745 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1746 mv_intr_edma(ap);
1747 } else {
1748 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1749 mv_intr_pio(ap);
20f733e7
BR
1750 }
1751 }
1752 VPRINTK("EXIT\n");
1753}
1754
bdd4ddde
JG
1755static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1756{
02a121da 1757 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1758 struct ata_port *ap;
1759 struct ata_queued_cmd *qc;
1760 struct ata_eh_info *ehi;
1761 unsigned int i, err_mask, printed = 0;
1762 u32 err_cause;
1763
02a121da 1764 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1765
1766 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1767 err_cause);
1768
1769 DPRINTK("All regs @ PCI error\n");
1770 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1771
02a121da 1772 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1773
1774 for (i = 0; i < host->n_ports; i++) {
1775 ap = host->ports[i];
936fd732 1776 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1777 ehi = &ap->link.eh_info;
bdd4ddde
JG
1778 ata_ehi_clear_desc(ehi);
1779 if (!printed++)
1780 ata_ehi_push_desc(ehi,
1781 "PCI err cause 0x%08x", err_cause);
1782 err_mask = AC_ERR_HOST_BUS;
1783 ehi->action = ATA_EH_HARDRESET;
9af5c9c9 1784 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1785 if (qc)
1786 qc->err_mask |= err_mask;
1787 else
1788 ehi->err_mask |= err_mask;
1789
1790 ata_port_freeze(ap);
1791 }
1792 }
1793}
1794
05b308e1 1795/**
c5d3e45a 1796 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1797 * @irq: unused
1798 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1799 *
1800 * Read the read only register to determine if any host
1801 * controllers have pending interrupts. If so, call lower level
1802 * routine to handle. Also check for PCI errors which are only
1803 * reported here.
1804 *
8b260248 1805 * LOCKING:
cca3974e 1806 * This routine holds the host lock while processing pending
05b308e1
BR
1807 * interrupts.
1808 */
7d12e780 1809static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1810{
cca3974e 1811 struct ata_host *host = dev_instance;
f351b2d6 1812 struct mv_host_priv *hpriv = host->private_data;
20f733e7 1813 unsigned int hc, handled = 0, n_hcs;
f351b2d6 1814 void __iomem *mmio = hpriv->base;
646a4da5 1815 u32 irq_stat, irq_mask;
20f733e7 1816
646a4da5 1817 spin_lock(&host->lock);
f351b2d6
SB
1818
1819 irq_stat = readl(hpriv->main_cause_reg_addr);
1820 irq_mask = readl(hpriv->main_mask_reg_addr);
20f733e7
BR
1821
1822 /* check the cases where we either have nothing pending or have read
1823 * a bogus register value which can indicate HW removal or PCI fault
1824 */
646a4da5
ML
1825 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1826 goto out_unlock;
20f733e7 1827
cca3974e 1828 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1829
7bb3c529 1830 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
bdd4ddde
JG
1831 mv_pci_error(host, mmio);
1832 handled = 1;
1833 goto out_unlock; /* skip all other HC irq handling */
1834 }
1835
20f733e7
BR
1836 for (hc = 0; hc < n_hcs; hc++) {
1837 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1838 if (relevant) {
cca3974e 1839 mv_host_intr(host, relevant, hc);
bdd4ddde 1840 handled = 1;
20f733e7
BR
1841 }
1842 }
615ab953 1843
bdd4ddde 1844out_unlock:
cca3974e 1845 spin_unlock(&host->lock);
20f733e7
BR
1846
1847 return IRQ_RETVAL(handled);
1848}
1849
c9d39130
JG
1850static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1851{
1852 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1853 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1854
1855 return hc_mmio + ofs;
1856}
1857
1858static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1859{
1860 unsigned int ofs;
1861
1862 switch (sc_reg_in) {
1863 case SCR_STATUS:
1864 case SCR_ERROR:
1865 case SCR_CONTROL:
1866 ofs = sc_reg_in * sizeof(u32);
1867 break;
1868 default:
1869 ofs = 0xffffffffU;
1870 break;
1871 }
1872 return ofs;
1873}
1874
da3dbb17 1875static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1876{
f351b2d6
SB
1877 struct mv_host_priv *hpriv = ap->host->private_data;
1878 void __iomem *mmio = hpriv->base;
0d5ff566 1879 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1880 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1881
da3dbb17
TH
1882 if (ofs != 0xffffffffU) {
1883 *val = readl(addr + ofs);
1884 return 0;
1885 } else
1886 return -EINVAL;
c9d39130
JG
1887}
1888
da3dbb17 1889static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1890{
f351b2d6
SB
1891 struct mv_host_priv *hpriv = ap->host->private_data;
1892 void __iomem *mmio = hpriv->base;
0d5ff566 1893 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1894 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1895
da3dbb17 1896 if (ofs != 0xffffffffU) {
0d5ff566 1897 writelfl(val, addr + ofs);
da3dbb17
TH
1898 return 0;
1899 } else
1900 return -EINVAL;
c9d39130
JG
1901}
1902
7bb3c529 1903static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 1904{
7bb3c529 1905 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
1906 int early_5080;
1907
44c10138 1908 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1909
1910 if (!early_5080) {
1911 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1912 tmp |= (1 << 0);
1913 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1914 }
1915
7bb3c529 1916 mv_reset_pci_bus(host, mmio);
522479fb
JG
1917}
1918
1919static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1920{
1921 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1922}
1923
47c2b677 1924static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1925 void __iomem *mmio)
1926{
c9d39130
JG
1927 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1928 u32 tmp;
1929
1930 tmp = readl(phy_mmio + MV5_PHY_MODE);
1931
1932 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1933 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1934}
1935
47c2b677 1936static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1937{
522479fb
JG
1938 u32 tmp;
1939
1940 writel(0, mmio + MV_GPIO_PORT_CTL);
1941
1942 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1943
1944 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1945 tmp |= ~(1 << 0);
1946 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1947}
1948
2a47ce06
JG
1949static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1950 unsigned int port)
bca1c4eb 1951{
c9d39130
JG
1952 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1953 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1954 u32 tmp;
1955 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1956
1957 if (fix_apm_sq) {
1958 tmp = readl(phy_mmio + MV5_LT_MODE);
1959 tmp |= (1 << 19);
1960 writel(tmp, phy_mmio + MV5_LT_MODE);
1961
1962 tmp = readl(phy_mmio + MV5_PHY_CTL);
1963 tmp &= ~0x3;
1964 tmp |= 0x1;
1965 writel(tmp, phy_mmio + MV5_PHY_CTL);
1966 }
1967
1968 tmp = readl(phy_mmio + MV5_PHY_MODE);
1969 tmp &= ~mask;
1970 tmp |= hpriv->signal[port].pre;
1971 tmp |= hpriv->signal[port].amps;
1972 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1973}
1974
c9d39130
JG
1975
1976#undef ZERO
1977#define ZERO(reg) writel(0, port_mmio + (reg))
1978static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1979 unsigned int port)
1980{
1981 void __iomem *port_mmio = mv_port_base(mmio, port);
1982
1983 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1984
1985 mv_channel_reset(hpriv, mmio, port);
1986
1987 ZERO(0x028); /* command */
1988 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1989 ZERO(0x004); /* timer */
1990 ZERO(0x008); /* irq err cause */
1991 ZERO(0x00c); /* irq err mask */
1992 ZERO(0x010); /* rq bah */
1993 ZERO(0x014); /* rq inp */
1994 ZERO(0x018); /* rq outp */
1995 ZERO(0x01c); /* respq bah */
1996 ZERO(0x024); /* respq outp */
1997 ZERO(0x020); /* respq inp */
1998 ZERO(0x02c); /* test control */
1999 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2000}
2001#undef ZERO
2002
2003#define ZERO(reg) writel(0, hc_mmio + (reg))
2004static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2005 unsigned int hc)
47c2b677 2006{
c9d39130
JG
2007 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2008 u32 tmp;
2009
2010 ZERO(0x00c);
2011 ZERO(0x010);
2012 ZERO(0x014);
2013 ZERO(0x018);
2014
2015 tmp = readl(hc_mmio + 0x20);
2016 tmp &= 0x1c1c1c1c;
2017 tmp |= 0x03030303;
2018 writel(tmp, hc_mmio + 0x20);
2019}
2020#undef ZERO
2021
2022static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2023 unsigned int n_hc)
2024{
2025 unsigned int hc, port;
2026
2027 for (hc = 0; hc < n_hc; hc++) {
2028 for (port = 0; port < MV_PORTS_PER_HC; port++)
2029 mv5_reset_hc_port(hpriv, mmio,
2030 (hc * MV_PORTS_PER_HC) + port);
2031
2032 mv5_reset_one_hc(hpriv, mmio, hc);
2033 }
2034
2035 return 0;
47c2b677
JG
2036}
2037
101ffae2
JG
2038#undef ZERO
2039#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 2040static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 2041{
02a121da 2042 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
2043 u32 tmp;
2044
2045 tmp = readl(mmio + MV_PCI_MODE);
2046 tmp &= 0xff00ffff;
2047 writel(tmp, mmio + MV_PCI_MODE);
2048
2049 ZERO(MV_PCI_DISC_TIMER);
2050 ZERO(MV_PCI_MSI_TRIGGER);
2051 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2052 ZERO(HC_MAIN_IRQ_MASK_OFS);
2053 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
2054 ZERO(hpriv->irq_cause_ofs);
2055 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
2056 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2057 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2058 ZERO(MV_PCI_ERR_ATTRIBUTE);
2059 ZERO(MV_PCI_ERR_COMMAND);
2060}
2061#undef ZERO
2062
2063static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2064{
2065 u32 tmp;
2066
2067 mv5_reset_flash(hpriv, mmio);
2068
2069 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2070 tmp &= 0x3;
2071 tmp |= (1 << 5) | (1 << 6);
2072 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2073}
2074
2075/**
2076 * mv6_reset_hc - Perform the 6xxx global soft reset
2077 * @mmio: base address of the HBA
2078 *
2079 * This routine only applies to 6xxx parts.
2080 *
2081 * LOCKING:
2082 * Inherited from caller.
2083 */
c9d39130
JG
2084static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2085 unsigned int n_hc)
101ffae2
JG
2086{
2087 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2088 int i, rc = 0;
2089 u32 t;
2090
2091 /* Following procedure defined in PCI "main command and status
2092 * register" table.
2093 */
2094 t = readl(reg);
2095 writel(t | STOP_PCI_MASTER, reg);
2096
2097 for (i = 0; i < 1000; i++) {
2098 udelay(1);
2099 t = readl(reg);
2dcb407e 2100 if (PCI_MASTER_EMPTY & t)
101ffae2 2101 break;
101ffae2
JG
2102 }
2103 if (!(PCI_MASTER_EMPTY & t)) {
2104 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2105 rc = 1;
2106 goto done;
2107 }
2108
2109 /* set reset */
2110 i = 5;
2111 do {
2112 writel(t | GLOB_SFT_RST, reg);
2113 t = readl(reg);
2114 udelay(1);
2115 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2116
2117 if (!(GLOB_SFT_RST & t)) {
2118 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2119 rc = 1;
2120 goto done;
2121 }
2122
2123 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2124 i = 5;
2125 do {
2126 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2127 t = readl(reg);
2128 udelay(1);
2129 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2130
2131 if (GLOB_SFT_RST & t) {
2132 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2133 rc = 1;
2134 }
2135done:
2136 return rc;
2137}
2138
47c2b677 2139static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2140 void __iomem *mmio)
2141{
2142 void __iomem *port_mmio;
2143 u32 tmp;
2144
ba3fe8fb
JG
2145 tmp = readl(mmio + MV_RESET_CFG);
2146 if ((tmp & (1 << 0)) == 0) {
47c2b677 2147 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2148 hpriv->signal[idx].pre = 0x1 << 5;
2149 return;
2150 }
2151
2152 port_mmio = mv_port_base(mmio, idx);
2153 tmp = readl(port_mmio + PHY_MODE2);
2154
2155 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2156 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2157}
2158
47c2b677 2159static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2160{
47c2b677 2161 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2162}
2163
c9d39130 2164static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2165 unsigned int port)
bca1c4eb 2166{
c9d39130
JG
2167 void __iomem *port_mmio = mv_port_base(mmio, port);
2168
bca1c4eb 2169 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2170 int fix_phy_mode2 =
2171 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2172 int fix_phy_mode4 =
47c2b677
JG
2173 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2174 u32 m2, tmp;
2175
2176 if (fix_phy_mode2) {
2177 m2 = readl(port_mmio + PHY_MODE2);
2178 m2 &= ~(1 << 16);
2179 m2 |= (1 << 31);
2180 writel(m2, port_mmio + PHY_MODE2);
2181
2182 udelay(200);
2183
2184 m2 = readl(port_mmio + PHY_MODE2);
2185 m2 &= ~((1 << 16) | (1 << 31));
2186 writel(m2, port_mmio + PHY_MODE2);
2187
2188 udelay(200);
2189 }
2190
2191 /* who knows what this magic does */
2192 tmp = readl(port_mmio + PHY_MODE3);
2193 tmp &= ~0x7F800000;
2194 tmp |= 0x2A800000;
2195 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2196
2197 if (fix_phy_mode4) {
47c2b677 2198 u32 m4;
bca1c4eb
JG
2199
2200 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2201
2202 if (hp_flags & MV_HP_ERRATA_60X1B2)
2203 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2204
2205 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2206
2207 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2208
2209 if (hp_flags & MV_HP_ERRATA_60X1B2)
2210 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2211 }
2212
2213 /* Revert values of pre-emphasis and signal amps to the saved ones */
2214 m2 = readl(port_mmio + PHY_MODE2);
2215
2216 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2217 m2 |= hpriv->signal[port].amps;
2218 m2 |= hpriv->signal[port].pre;
47c2b677 2219 m2 &= ~(1 << 16);
bca1c4eb 2220
e4e7b892
JG
2221 /* according to mvSata 3.6.1, some IIE values are fixed */
2222 if (IS_GEN_IIE(hpriv)) {
2223 m2 &= ~0xC30FF01F;
2224 m2 |= 0x0000900F;
2225 }
2226
bca1c4eb
JG
2227 writel(m2, port_mmio + PHY_MODE2);
2228}
2229
f351b2d6
SB
2230/* TODO: use the generic LED interface to configure the SATA Presence */
2231/* & Acitivy LEDs on the board */
2232static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2233 void __iomem *mmio)
2234{
2235 return;
2236}
2237
2238static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2239 void __iomem *mmio)
2240{
2241 void __iomem *port_mmio;
2242 u32 tmp;
2243
2244 port_mmio = mv_port_base(mmio, idx);
2245 tmp = readl(port_mmio + PHY_MODE2);
2246
2247 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2248 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2249}
2250
2251#undef ZERO
2252#define ZERO(reg) writel(0, port_mmio + (reg))
2253static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2254 void __iomem *mmio, unsigned int port)
2255{
2256 void __iomem *port_mmio = mv_port_base(mmio, port);
2257
2258 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2259
2260 mv_channel_reset(hpriv, mmio, port);
2261
2262 ZERO(0x028); /* command */
2263 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2264 ZERO(0x004); /* timer */
2265 ZERO(0x008); /* irq err cause */
2266 ZERO(0x00c); /* irq err mask */
2267 ZERO(0x010); /* rq bah */
2268 ZERO(0x014); /* rq inp */
2269 ZERO(0x018); /* rq outp */
2270 ZERO(0x01c); /* respq bah */
2271 ZERO(0x024); /* respq outp */
2272 ZERO(0x020); /* respq inp */
2273 ZERO(0x02c); /* test control */
2274 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2275}
2276
2277#undef ZERO
2278
2279#define ZERO(reg) writel(0, hc_mmio + (reg))
2280static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2281 void __iomem *mmio)
2282{
2283 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2284
2285 ZERO(0x00c);
2286 ZERO(0x010);
2287 ZERO(0x014);
2288
2289}
2290
2291#undef ZERO
2292
2293static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2294 void __iomem *mmio, unsigned int n_hc)
2295{
2296 unsigned int port;
2297
2298 for (port = 0; port < hpriv->n_ports; port++)
2299 mv_soc_reset_hc_port(hpriv, mmio, port);
2300
2301 mv_soc_reset_one_hc(hpriv, mmio);
2302
2303 return 0;
2304}
2305
2306static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2307 void __iomem *mmio)
2308{
2309 return;
2310}
2311
2312static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2313{
2314 return;
2315}
2316
c9d39130
JG
2317static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2318 unsigned int port_no)
2319{
2320 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2321
2322 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2323
ee9ccdf7 2324 if (IS_GEN_II(hpriv)) {
c9d39130 2325 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2326 ifctl |= (1 << 7); /* enable gen2i speed */
2327 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2328 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2329 }
2330
2331 udelay(25); /* allow reset propagation */
2332
2333 /* Spec never mentions clearing the bit. Marvell's driver does
2334 * clear the bit, however.
2335 */
2336 writelfl(0, port_mmio + EDMA_CMD_OFS);
2337
2338 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2339
ee9ccdf7 2340 if (IS_GEN_I(hpriv))
c9d39130
JG
2341 mdelay(1);
2342}
2343
05b308e1 2344/**
bdd4ddde 2345 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2346 * @ap: ATA channel to manipulate
2347 *
2348 * Part of this is taken from __sata_phy_reset and modified to
2349 * not sleep since this routine gets called from interrupt level.
2350 *
2351 * LOCKING:
2352 * Inherited from caller. This is coded to safe to call at
2353 * interrupt level, i.e. it does not sleep.
31961943 2354 */
bdd4ddde
JG
2355static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2356 unsigned long deadline)
20f733e7 2357{
095fec88 2358 struct mv_port_priv *pp = ap->private_data;
cca3974e 2359 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2360 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2361 int retry = 5;
2362 u32 sstatus;
20f733e7
BR
2363
2364 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2365
da3dbb17
TH
2366#ifdef DEBUG
2367 {
2368 u32 sstatus, serror, scontrol;
2369
2370 mv_scr_read(ap, SCR_STATUS, &sstatus);
2371 mv_scr_read(ap, SCR_ERROR, &serror);
2372 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2373 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2d79ab8f 2374 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
da3dbb17
TH
2375 }
2376#endif
20f733e7 2377
22374677
JG
2378 /* Issue COMRESET via SControl */
2379comreset_retry:
936fd732 2380 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2381 msleep(1);
22374677 2382
936fd732 2383 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2384 msleep(20);
22374677 2385
31961943 2386 do {
936fd732 2387 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2388 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2389 break;
22374677 2390
bdd4ddde 2391 msleep(1);
c5d3e45a 2392 } while (time_before(jiffies, deadline));
20f733e7 2393
22374677 2394 /* work around errata */
ee9ccdf7 2395 if (IS_GEN_II(hpriv) &&
22374677
JG
2396 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2397 (retry-- > 0))
2398 goto comreset_retry;
095fec88 2399
da3dbb17
TH
2400#ifdef DEBUG
2401 {
2402 u32 sstatus, serror, scontrol;
2403
2404 mv_scr_read(ap, SCR_STATUS, &sstatus);
2405 mv_scr_read(ap, SCR_ERROR, &serror);
2406 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2407 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2408 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2409 }
2410#endif
31961943 2411
936fd732 2412 if (ata_link_offline(&ap->link)) {
bdd4ddde 2413 *class = ATA_DEV_NONE;
20f733e7
BR
2414 return;
2415 }
2416
22374677
JG
2417 /* even after SStatus reflects that device is ready,
2418 * it seems to take a while for link to be fully
2419 * established (and thus Status no longer 0x80/0x7F),
2420 * so we poll a bit for that, here.
2421 */
2422 retry = 20;
2423 while (1) {
2424 u8 drv_stat = ata_check_status(ap);
2425 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2426 break;
bdd4ddde 2427 msleep(500);
22374677
JG
2428 if (retry-- <= 0)
2429 break;
bdd4ddde
JG
2430 if (time_after(jiffies, deadline))
2431 break;
22374677
JG
2432 }
2433
bdd4ddde
JG
2434 /* FIXME: if we passed the deadline, the following
2435 * code probably produces an invalid result
2436 */
20f733e7 2437
bdd4ddde 2438 /* finally, read device signature from TF registers */
3f19859e 2439 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2440
2441 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2442
bdd4ddde 2443 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2444
bca1c4eb 2445 VPRINTK("EXIT\n");
20f733e7
BR
2446}
2447
cc0680a5 2448static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2449{
cc0680a5 2450 struct ata_port *ap = link->ap;
bdd4ddde 2451 struct mv_port_priv *pp = ap->private_data;
cc0680a5 2452 struct ata_eh_context *ehc = &link->eh_context;
bdd4ddde 2453 int rc;
0ea9e179 2454
bdd4ddde
JG
2455 rc = mv_stop_dma(ap);
2456 if (rc)
2457 ehc->i.action |= ATA_EH_HARDRESET;
2458
2459 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2460 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2461 ehc->i.action |= ATA_EH_HARDRESET;
2462 }
2463
2464 /* if we're about to do hardreset, nothing more to do */
2465 if (ehc->i.action & ATA_EH_HARDRESET)
2466 return 0;
2467
cc0680a5 2468 if (ata_link_online(link))
bdd4ddde
JG
2469 rc = ata_wait_ready(ap, deadline);
2470 else
2471 rc = -ENODEV;
2472
2473 return rc;
22374677
JG
2474}
2475
cc0680a5 2476static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2477 unsigned long deadline)
31961943 2478{
cc0680a5 2479 struct ata_port *ap = link->ap;
bdd4ddde 2480 struct mv_host_priv *hpriv = ap->host->private_data;
f351b2d6 2481 void __iomem *mmio = hpriv->base;
31961943 2482
bdd4ddde 2483 mv_stop_dma(ap);
31961943 2484
bdd4ddde 2485 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2486
bdd4ddde
JG
2487 mv_phy_reset(ap, class, deadline);
2488
2489 return 0;
2490}
2491
cc0680a5 2492static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2493{
cc0680a5 2494 struct ata_port *ap = link->ap;
bdd4ddde
JG
2495 u32 serr;
2496
2497 /* print link status */
cc0680a5 2498 sata_print_link_status(link);
31961943 2499
bdd4ddde 2500 /* clear SError */
cc0680a5
TH
2501 sata_scr_read(link, SCR_ERROR, &serr);
2502 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2503
2504 /* bail out if no device is present */
2505 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2506 DPRINTK("EXIT, no device\n");
2507 return;
9b358e30 2508 }
bdd4ddde
JG
2509
2510 /* set up device control */
2511 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2512}
2513
2514static void mv_error_handler(struct ata_port *ap)
2515{
2516 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2517 mv_hardreset, mv_postreset);
2518}
2519
bdd4ddde
JG
2520static void mv_eh_freeze(struct ata_port *ap)
2521{
f351b2d6 2522 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde
JG
2523 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2524 u32 tmp, mask;
2525 unsigned int shift;
2526
2527 /* FIXME: handle coalescing completion events properly */
2528
2529 shift = ap->port_no * 2;
2530 if (hc > 0)
2531 shift++;
2532
2533 mask = 0x3 << shift;
2534
2535 /* disable assertion of portN err, done events */
f351b2d6
SB
2536 tmp = readl(hpriv->main_mask_reg_addr);
2537 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
bdd4ddde
JG
2538}
2539
2540static void mv_eh_thaw(struct ata_port *ap)
2541{
f351b2d6
SB
2542 struct mv_host_priv *hpriv = ap->host->private_data;
2543 void __iomem *mmio = hpriv->base;
bdd4ddde
JG
2544 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2545 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2546 void __iomem *port_mmio = mv_ap_base(ap);
2547 u32 tmp, mask, hc_irq_cause;
2548 unsigned int shift, hc_port_no = ap->port_no;
2549
2550 /* FIXME: handle coalescing completion events properly */
2551
2552 shift = ap->port_no * 2;
2553 if (hc > 0) {
2554 shift++;
2555 hc_port_no -= 4;
2556 }
2557
2558 mask = 0x3 << shift;
2559
2560 /* clear EDMA errors on this port */
2561 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2562
2563 /* clear pending irq events */
2564 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2565 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2566 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2567 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2568
2569 /* enable assertion of portN err, done events */
f351b2d6
SB
2570 tmp = readl(hpriv->main_mask_reg_addr);
2571 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
31961943
BR
2572}
2573
05b308e1
BR
2574/**
2575 * mv_port_init - Perform some early initialization on a single port.
2576 * @port: libata data structure storing shadow register addresses
2577 * @port_mmio: base address of the port
2578 *
2579 * Initialize shadow register mmio addresses, clear outstanding
2580 * interrupts on the port, and unmask interrupts for the future
2581 * start of the port.
2582 *
2583 * LOCKING:
2584 * Inherited from caller.
2585 */
31961943 2586static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2587{
0d5ff566 2588 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2589 unsigned serr_ofs;
2590
8b260248 2591 /* PIO related setup
31961943
BR
2592 */
2593 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2594 port->error_addr =
31961943
BR
2595 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2596 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2597 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2598 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2599 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2600 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2601 port->status_addr =
31961943
BR
2602 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2603 /* special case: control/altstatus doesn't have ATA_REG_ address */
2604 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2605
2606 /* unused: */
8d9db2d2 2607 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2608
31961943
BR
2609 /* Clear any currently outstanding port interrupt conditions */
2610 serr_ofs = mv_scr_offset(SCR_ERROR);
2611 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2612 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2613
646a4da5
ML
2614 /* unmask all non-transient EDMA error interrupts */
2615 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2616
8b260248 2617 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2618 readl(port_mmio + EDMA_CFG_OFS),
2619 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2620 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2621}
2622
4447d351 2623static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2624{
4447d351
TH
2625 struct pci_dev *pdev = to_pci_dev(host->dev);
2626 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2627 u32 hp_flags = hpriv->hp_flags;
2628
5796d1c4 2629 switch (board_idx) {
47c2b677
JG
2630 case chip_5080:
2631 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2632 hp_flags |= MV_HP_GEN_I;
47c2b677 2633
44c10138 2634 switch (pdev->revision) {
47c2b677
JG
2635 case 0x1:
2636 hp_flags |= MV_HP_ERRATA_50XXB0;
2637 break;
2638 case 0x3:
2639 hp_flags |= MV_HP_ERRATA_50XXB2;
2640 break;
2641 default:
2642 dev_printk(KERN_WARNING, &pdev->dev,
2643 "Applying 50XXB2 workarounds to unknown rev\n");
2644 hp_flags |= MV_HP_ERRATA_50XXB2;
2645 break;
2646 }
2647 break;
2648
bca1c4eb
JG
2649 case chip_504x:
2650 case chip_508x:
47c2b677 2651 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2652 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2653
44c10138 2654 switch (pdev->revision) {
47c2b677
JG
2655 case 0x0:
2656 hp_flags |= MV_HP_ERRATA_50XXB0;
2657 break;
2658 case 0x3:
2659 hp_flags |= MV_HP_ERRATA_50XXB2;
2660 break;
2661 default:
2662 dev_printk(KERN_WARNING, &pdev->dev,
2663 "Applying B2 workarounds to unknown rev\n");
2664 hp_flags |= MV_HP_ERRATA_50XXB2;
2665 break;
bca1c4eb
JG
2666 }
2667 break;
2668
2669 case chip_604x:
2670 case chip_608x:
47c2b677 2671 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2672 hp_flags |= MV_HP_GEN_II;
47c2b677 2673
44c10138 2674 switch (pdev->revision) {
47c2b677
JG
2675 case 0x7:
2676 hp_flags |= MV_HP_ERRATA_60X1B2;
2677 break;
2678 case 0x9:
2679 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2680 break;
2681 default:
2682 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2683 "Applying B2 workarounds to unknown rev\n");
2684 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2685 break;
2686 }
2687 break;
2688
e4e7b892 2689 case chip_7042:
02a121da 2690 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2691 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2692 (pdev->device == 0x2300 || pdev->device == 0x2310))
2693 {
4e520033
ML
2694 /*
2695 * Highpoint RocketRAID PCIe 23xx series cards:
2696 *
2697 * Unconfigured drives are treated as "Legacy"
2698 * by the BIOS, and it overwrites sector 8 with
2699 * a "Lgcy" metadata block prior to Linux boot.
2700 *
2701 * Configured drives (RAID or JBOD) leave sector 8
2702 * alone, but instead overwrite a high numbered
2703 * sector for the RAID metadata. This sector can
2704 * be determined exactly, by truncating the physical
2705 * drive capacity to a nice even GB value.
2706 *
2707 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2708 *
2709 * Warn the user, lest they think we're just buggy.
2710 */
2711 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2712 " BIOS CORRUPTS DATA on all attached drives,"
2713 " regardless of if/how they are configured."
2714 " BEWARE!\n");
2715 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2716 " use sectors 8-9 on \"Legacy\" drives,"
2717 " and avoid the final two gigabytes on"
2718 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2719 }
e4e7b892
JG
2720 case chip_6042:
2721 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2722 hp_flags |= MV_HP_GEN_IIE;
2723
44c10138 2724 switch (pdev->revision) {
e4e7b892
JG
2725 case 0x0:
2726 hp_flags |= MV_HP_ERRATA_XX42A0;
2727 break;
2728 case 0x1:
2729 hp_flags |= MV_HP_ERRATA_60X1C0;
2730 break;
2731 default:
2732 dev_printk(KERN_WARNING, &pdev->dev,
2733 "Applying 60X1C0 workarounds to unknown rev\n");
2734 hp_flags |= MV_HP_ERRATA_60X1C0;
2735 break;
2736 }
2737 break;
f351b2d6
SB
2738 case chip_soc:
2739 hpriv->ops = &mv_soc_ops;
2740 hp_flags |= MV_HP_ERRATA_60X1C0;
2741 break;
e4e7b892 2742
bca1c4eb 2743 default:
f351b2d6 2744 dev_printk(KERN_ERR, host->dev,
5796d1c4 2745 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2746 return 1;
2747 }
2748
2749 hpriv->hp_flags = hp_flags;
02a121da
ML
2750 if (hp_flags & MV_HP_PCIE) {
2751 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2752 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2753 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2754 } else {
2755 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2756 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2757 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2758 }
bca1c4eb
JG
2759
2760 return 0;
2761}
2762
05b308e1 2763/**
47c2b677 2764 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2765 * @host: ATA host to initialize
2766 * @board_idx: controller index
05b308e1
BR
2767 *
2768 * If possible, do an early global reset of the host. Then do
2769 * our port init and clear/unmask all/relevant host interrupts.
2770 *
2771 * LOCKING:
2772 * Inherited from caller.
2773 */
4447d351 2774static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2775{
2776 int rc = 0, n_hc, port, hc;
4447d351 2777 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 2778 void __iomem *mmio = hpriv->base;
47c2b677 2779
4447d351 2780 rc = mv_chip_id(host, board_idx);
bca1c4eb 2781 if (rc)
f351b2d6
SB
2782 goto done;
2783
2784 if (HAS_PCI(host)) {
2785 hpriv->main_cause_reg_addr = hpriv->base +
2786 HC_MAIN_IRQ_CAUSE_OFS;
2787 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2788 } else {
2789 hpriv->main_cause_reg_addr = hpriv->base +
2790 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2791 hpriv->main_mask_reg_addr = hpriv->base +
2792 HC_SOC_MAIN_IRQ_MASK_OFS;
2793 }
2794 /* global interrupt mask */
2795 writel(0, hpriv->main_mask_reg_addr);
bca1c4eb 2796
4447d351 2797 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2798
4447d351 2799 for (port = 0; port < host->n_ports; port++)
47c2b677 2800 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2801
c9d39130 2802 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2803 if (rc)
20f733e7 2804 goto done;
20f733e7 2805
522479fb 2806 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 2807 hpriv->ops->reset_bus(host, mmio);
47c2b677 2808 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2809
4447d351 2810 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2811 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2812 void __iomem *port_mmio = mv_port_base(mmio, port);
2813
2a47ce06 2814 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2815 ifctl |= (1 << 7); /* enable gen2i speed */
2816 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2817 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2818 }
2819
c9d39130 2820 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2821 }
2822
4447d351 2823 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2824 struct ata_port *ap = host->ports[port];
2a47ce06 2825 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2826
2827 mv_port_init(&ap->ioaddr, port_mmio);
2828
7bb3c529 2829#ifdef CONFIG_PCI
f351b2d6
SB
2830 if (HAS_PCI(host)) {
2831 unsigned int offset = port_mmio - mmio;
2832 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2833 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2834 }
7bb3c529 2835#endif
20f733e7
BR
2836 }
2837
2838 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2839 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2840
2841 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2842 "(before clear)=0x%08x\n", hc,
2843 readl(hc_mmio + HC_CFG_OFS),
2844 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2845
2846 /* Clear any currently outstanding hc interrupt conditions */
2847 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2848 }
2849
f351b2d6
SB
2850 if (HAS_PCI(host)) {
2851 /* Clear any currently outstanding host interrupt conditions */
2852 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943 2853
f351b2d6
SB
2854 /* and unmask interrupt generation for host regs */
2855 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2856 if (IS_GEN_I(hpriv))
2857 writelfl(~HC_MAIN_MASKED_IRQS_5,
2858 hpriv->main_mask_reg_addr);
2859 else
2860 writelfl(~HC_MAIN_MASKED_IRQS,
2861 hpriv->main_mask_reg_addr);
2862
2863 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2864 "PCI int cause/mask=0x%08x/0x%08x\n",
2865 readl(hpriv->main_cause_reg_addr),
2866 readl(hpriv->main_mask_reg_addr),
2867 readl(mmio + hpriv->irq_cause_ofs),
2868 readl(mmio + hpriv->irq_mask_ofs));
2869 } else {
2870 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2871 hpriv->main_mask_reg_addr);
2872 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2873 readl(hpriv->main_cause_reg_addr),
2874 readl(hpriv->main_mask_reg_addr));
2875 }
2876done:
2877 return rc;
2878}
fb621e2f 2879
fbf14e2f
BB
2880static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2881{
2882 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2883 MV_CRQB_Q_SZ, 0);
2884 if (!hpriv->crqb_pool)
2885 return -ENOMEM;
2886
2887 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2888 MV_CRPB_Q_SZ, 0);
2889 if (!hpriv->crpb_pool)
2890 return -ENOMEM;
2891
2892 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2893 MV_SG_TBL_SZ, 0);
2894 if (!hpriv->sg_tbl_pool)
2895 return -ENOMEM;
2896
2897 return 0;
2898}
2899
f351b2d6
SB
2900/**
2901 * mv_platform_probe - handle a positive probe of an soc Marvell
2902 * host
2903 * @pdev: platform device found
2904 *
2905 * LOCKING:
2906 * Inherited from caller.
2907 */
2908static int mv_platform_probe(struct platform_device *pdev)
2909{
2910 static int printed_version;
2911 const struct mv_sata_platform_data *mv_platform_data;
2912 const struct ata_port_info *ppi[] =
2913 { &mv_port_info[chip_soc], NULL };
2914 struct ata_host *host;
2915 struct mv_host_priv *hpriv;
2916 struct resource *res;
2917 int n_ports, rc;
20f733e7 2918
f351b2d6
SB
2919 if (!printed_version++)
2920 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 2921
f351b2d6
SB
2922 /*
2923 * Simple resource validation ..
2924 */
2925 if (unlikely(pdev->num_resources != 2)) {
2926 dev_err(&pdev->dev, "invalid number of resources\n");
2927 return -EINVAL;
2928 }
2929
2930 /*
2931 * Get the register base first
2932 */
2933 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2934 if (res == NULL)
2935 return -EINVAL;
2936
2937 /* allocate host */
2938 mv_platform_data = pdev->dev.platform_data;
2939 n_ports = mv_platform_data->n_ports;
2940
2941 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2942 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2943
2944 if (!host || !hpriv)
2945 return -ENOMEM;
2946 host->private_data = hpriv;
2947 hpriv->n_ports = n_ports;
2948
2949 host->iomap = NULL;
f1cb0ea1
SB
2950 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2951 res->end - res->start + 1);
f351b2d6
SB
2952 hpriv->base -= MV_SATAHC0_REG_BASE;
2953
fbf14e2f
BB
2954 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2955 if (rc)
2956 return rc;
2957
f351b2d6
SB
2958 /* initialize adapter */
2959 rc = mv_init_host(host, chip_soc);
2960 if (rc)
2961 return rc;
2962
2963 dev_printk(KERN_INFO, &pdev->dev,
2964 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2965 host->n_ports);
2966
2967 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2968 IRQF_SHARED, &mv6_sht);
2969}
2970
2971/*
2972 *
2973 * mv_platform_remove - unplug a platform interface
2974 * @pdev: platform device
2975 *
2976 * A platform bus SATA device has been unplugged. Perform the needed
2977 * cleanup. Also called on module unload for any active devices.
2978 */
2979static int __devexit mv_platform_remove(struct platform_device *pdev)
2980{
2981 struct device *dev = &pdev->dev;
2982 struct ata_host *host = dev_get_drvdata(dev);
f351b2d6
SB
2983
2984 ata_host_detach(host);
f351b2d6 2985 return 0;
20f733e7
BR
2986}
2987
f351b2d6
SB
2988static struct platform_driver mv_platform_driver = {
2989 .probe = mv_platform_probe,
2990 .remove = __devexit_p(mv_platform_remove),
2991 .driver = {
2992 .name = DRV_NAME,
2993 .owner = THIS_MODULE,
2994 },
2995};
2996
2997
7bb3c529 2998#ifdef CONFIG_PCI
f351b2d6
SB
2999static int mv_pci_init_one(struct pci_dev *pdev,
3000 const struct pci_device_id *ent);
3001
7bb3c529
SB
3002
3003static struct pci_driver mv_pci_driver = {
3004 .name = DRV_NAME,
3005 .id_table = mv_pci_tbl,
f351b2d6 3006 .probe = mv_pci_init_one,
7bb3c529
SB
3007 .remove = ata_pci_remove_one,
3008};
3009
3010/*
3011 * module options
3012 */
3013static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
3014
3015
3016/* move to PCI layer or libata core? */
3017static int pci_go_64(struct pci_dev *pdev)
3018{
3019 int rc;
3020
3021 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3022 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3023 if (rc) {
3024 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3025 if (rc) {
3026 dev_printk(KERN_ERR, &pdev->dev,
3027 "64-bit DMA enable failed\n");
3028 return rc;
3029 }
3030 }
3031 } else {
3032 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3033 if (rc) {
3034 dev_printk(KERN_ERR, &pdev->dev,
3035 "32-bit DMA enable failed\n");
3036 return rc;
3037 }
3038 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3039 if (rc) {
3040 dev_printk(KERN_ERR, &pdev->dev,
3041 "32-bit consistent DMA enable failed\n");
3042 return rc;
3043 }
3044 }
3045
3046 return rc;
3047}
3048
05b308e1
BR
3049/**
3050 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 3051 * @host: ATA host to print info about
05b308e1
BR
3052 *
3053 * FIXME: complete this.
3054 *
3055 * LOCKING:
3056 * Inherited from caller.
3057 */
4447d351 3058static void mv_print_info(struct ata_host *host)
31961943 3059{
4447d351
TH
3060 struct pci_dev *pdev = to_pci_dev(host->dev);
3061 struct mv_host_priv *hpriv = host->private_data;
44c10138 3062 u8 scc;
c1e4fe71 3063 const char *scc_s, *gen;
31961943
BR
3064
3065 /* Use this to determine the HW stepping of the chip so we know
3066 * what errata to workaround
3067 */
31961943
BR
3068 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3069 if (scc == 0)
3070 scc_s = "SCSI";
3071 else if (scc == 0x01)
3072 scc_s = "RAID";
3073 else
c1e4fe71
JG
3074 scc_s = "?";
3075
3076 if (IS_GEN_I(hpriv))
3077 gen = "I";
3078 else if (IS_GEN_II(hpriv))
3079 gen = "II";
3080 else if (IS_GEN_IIE(hpriv))
3081 gen = "IIE";
3082 else
3083 gen = "?";
31961943 3084
a9524a76 3085 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
3086 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3087 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
3088 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3089}
3090
05b308e1 3091/**
f351b2d6 3092 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
3093 * @pdev: PCI device found
3094 * @ent: PCI device ID entry for the matched host
3095 *
3096 * LOCKING:
3097 * Inherited from caller.
3098 */
f351b2d6
SB
3099static int mv_pci_init_one(struct pci_dev *pdev,
3100 const struct pci_device_id *ent)
20f733e7 3101{
2dcb407e 3102 static int printed_version;
20f733e7 3103 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
3104 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3105 struct ata_host *host;
3106 struct mv_host_priv *hpriv;
3107 int n_ports, rc;
20f733e7 3108
a9524a76
JG
3109 if (!printed_version++)
3110 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 3111
4447d351
TH
3112 /* allocate host */
3113 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3114
3115 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3116 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3117 if (!host || !hpriv)
3118 return -ENOMEM;
3119 host->private_data = hpriv;
f351b2d6 3120 hpriv->n_ports = n_ports;
4447d351
TH
3121
3122 /* acquire resources */
24dc5f33
TH
3123 rc = pcim_enable_device(pdev);
3124 if (rc)
20f733e7 3125 return rc;
20f733e7 3126
0d5ff566
TH
3127 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3128 if (rc == -EBUSY)
24dc5f33 3129 pcim_pin_device(pdev);
0d5ff566 3130 if (rc)
24dc5f33 3131 return rc;
4447d351 3132 host->iomap = pcim_iomap_table(pdev);
f351b2d6 3133 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 3134
d88184fb
JG
3135 rc = pci_go_64(pdev);
3136 if (rc)
3137 return rc;
3138
da2fa9ba
ML
3139 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3140 if (rc)
3141 return rc;
3142
20f733e7 3143 /* initialize adapter */
4447d351 3144 rc = mv_init_host(host, board_idx);
24dc5f33
TH
3145 if (rc)
3146 return rc;
20f733e7 3147
31961943 3148 /* Enable interrupts */
6a59dcf8 3149 if (msi && pci_enable_msi(pdev))
31961943 3150 pci_intx(pdev, 1);
20f733e7 3151
31961943 3152 mv_dump_pci_cfg(pdev, 0x68);
4447d351 3153 mv_print_info(host);
20f733e7 3154
4447d351 3155 pci_set_master(pdev);
ea8b4db9 3156 pci_try_set_mwi(pdev);
4447d351 3157 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 3158 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 3159}
7bb3c529 3160#endif
20f733e7 3161
f351b2d6
SB
3162static int mv_platform_probe(struct platform_device *pdev);
3163static int __devexit mv_platform_remove(struct platform_device *pdev);
3164
20f733e7
BR
3165static int __init mv_init(void)
3166{
7bb3c529
SB
3167 int rc = -ENODEV;
3168#ifdef CONFIG_PCI
3169 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
3170 if (rc < 0)
3171 return rc;
3172#endif
3173 rc = platform_driver_register(&mv_platform_driver);
3174
3175#ifdef CONFIG_PCI
3176 if (rc < 0)
3177 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
3178#endif
3179 return rc;
20f733e7
BR
3180}
3181
3182static void __exit mv_exit(void)
3183{
7bb3c529 3184#ifdef CONFIG_PCI
20f733e7 3185 pci_unregister_driver(&mv_pci_driver);
7bb3c529 3186#endif
f351b2d6 3187 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
3188}
3189
3190MODULE_AUTHOR("Brett Russ");
3191MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3192MODULE_LICENSE("GPL");
3193MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3194MODULE_VERSION(DRV_VERSION);
2e7e1214 3195MODULE_ALIAS("platform:sata_mv");
20f733e7 3196
7bb3c529 3197#ifdef CONFIG_PCI
ddef9bb3
JG
3198module_param(msi, int, 0444);
3199MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
7bb3c529 3200#endif
ddef9bb3 3201
20f733e7
BR
3202module_init(mv_init);
3203module_exit(mv_exit);
This page took 0.509012 seconds and 5 git commands to generate.