sata_mv: simplify freeze/thaw bit-shift calculations
[deliverable/linux.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
e12bef50 4 * Copyright 2008: Marvell Corporation, all rights reserved.
8b260248 5 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 6 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
7 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
4a05e209
JG
25/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
1fd2e1c2
ML
33 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
4a05e209
JG
40
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
e49856d8 43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
4a05e209 44
40f0bc2d 45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
4a05e209 46
4a05e209
JG
47 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
4a05e209
JG
65*/
66
20f733e7
BR
67#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
8d8b6004 74#include <linux/dmapool.h>
20f733e7 75#include <linux/dma-mapping.h>
a9524a76 76#include <linux/device.h>
f351b2d6
SB
77#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
15a32632 79#include <linux/mbus.h>
20f733e7 80#include <scsi/scsi_host.h>
193515d5 81#include <scsi/scsi_cmnd.h>
6c08772e 82#include <scsi/scsi_device.h>
20f733e7 83#include <linux/libata.h>
20f733e7
BR
84
85#define DRV_NAME "sata_mv"
1fd2e1c2 86#define DRV_VERSION "1.20"
20f733e7
BR
87
88enum {
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
96
97 MV_PCI_REG_BASE = 0,
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
99 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104
20f733e7 105 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 106 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
20f733e7
BR
109
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
114
31961943
BR
115 MV_MAX_Q_DEPTH = 32,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 */
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 124 MV_MAX_SG_CT = 256,
31961943 125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 126
352fab70 127 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
20f733e7 128 MV_PORT_HC_SHIFT = 2,
352fab70
ML
129 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
130 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
20f733e7
BR
132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
7bb3c529 136 /* SoC integrated controllers, no PCI interface */
e12bef50 137 MV_FLAG_SOC = (1 << 28),
7bb3c529 138
c5d3e45a 139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
47c2b677 142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 143
31961943
BR
144 CRQB_FLAG_READ = (1 << 0),
145 CRQB_TAG_SHIFT = 1,
c5d3e45a 146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
e12bef50 147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
c5d3e45a 148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
152
153 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
156
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
158
20f733e7
BR
159 /* PCI interface registers */
160
31961943
BR
161 PCI_COMMAND_OFS = 0xc00,
162
20f733e7
BR
163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
167
522479fb
JG
168 MV_PCI_MODE = 0xd00,
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
178
02a121da
ML
179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182
02a121da
ML
183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 186
20f733e7
BR
187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
f351b2d6
SB
189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
352fab70
ML
191 ERR_IRQ = (1 << 0), /* shift by port # */
192 DONE_IRQ = (1 << 1), /* shift by port # */
20f733e7
BR
193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 PCI_ERR = (1 << 18),
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
e12bef50 206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
8b260248 207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
f9f7fe01 208 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
20f733e7
BR
209 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
210 HC_MAIN_RSVD),
fb621e2f
JG
211 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
212 HC_MAIN_RSVD_5),
f351b2d6 213 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
20f733e7
BR
214
215 /* SATAHC registers */
216 HC_CFG_OFS = 0,
217
218 HC_IRQ_CAUSE_OFS = 0x14,
352fab70
ML
219 DMA_IRQ = (1 << 0), /* shift by port # */
220 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
20f733e7
BR
221 DEV_IRQ = (1 << 8), /* shift by port # */
222
223 /* Shadow block registers */
31961943
BR
224 SHD_BLK_OFS = 0x100,
225 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
226
227 /* SATA registers */
228 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
229 SATA_ACTIVE_OFS = 0x350,
0c58912e 230 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
17c5aab5 231
e12bef50 232 LTMODE_OFS = 0x30c,
17c5aab5
ML
233 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
234
47c2b677 235 PHY_MODE3 = 0x310,
bca1c4eb
JG
236 PHY_MODE4 = 0x314,
237 PHY_MODE2 = 0x330,
e12bef50
ML
238 SATA_IFCTL_OFS = 0x344,
239 SATA_IFSTAT_OFS = 0x34c,
240 VENDOR_UNIQUE_FIS_OFS = 0x35c,
17c5aab5 241
e12bef50 242 FIS_CFG_OFS = 0x360,
17c5aab5
ML
243 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
244
c9d39130
JG
245 MV5_PHY_MODE = 0x74,
246 MV5_LT_MODE = 0x30,
247 MV5_PHY_CTL = 0x0C,
e12bef50 248 SATA_INTERFACE_CFG = 0x050,
bca1c4eb
JG
249
250 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
251
252 /* Port registers */
253 EDMA_CFG_OFS = 0,
0c58912e
ML
254 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
255 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
256 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
257 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
258 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
e12bef50
ML
259 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
260 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
20f733e7
BR
261
262 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
263 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
264 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
265 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
266 EDMA_ERR_DEV = (1 << 2), /* device error */
267 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
268 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
269 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
270 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
271 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 272 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 273 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
274 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
275 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
276 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
277 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 278
6c1153e0 279 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
280 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
281 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
282 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
283 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
284
6c1153e0 285 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 286
6c1153e0 287 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
288 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
289 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
290 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
291 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
292 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
293
6c1153e0 294 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 295
6c1153e0 296 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
297 EDMA_ERR_OVERRUN_5 = (1 << 5),
298 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
299
300 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
301 EDMA_ERR_LNK_CTRL_RX_1 |
302 EDMA_ERR_LNK_CTRL_RX_3 |
40f0bc2d
ML
303 EDMA_ERR_LNK_CTRL_TX |
304 /* temporary, until we fix hotplug: */
305 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
646a4da5 306
bdd4ddde
JG
307 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
308 EDMA_ERR_PRD_PAR |
309 EDMA_ERR_DEV_DCON |
310 EDMA_ERR_DEV_CON |
311 EDMA_ERR_SERR |
312 EDMA_ERR_SELF_DIS |
6c1153e0 313 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
314 EDMA_ERR_CRPB_PAR |
315 EDMA_ERR_INTRL_PAR |
316 EDMA_ERR_IORDY |
317 EDMA_ERR_LNK_CTRL_RX_2 |
318 EDMA_ERR_LNK_DATA_RX |
319 EDMA_ERR_LNK_DATA_TX |
320 EDMA_ERR_TRANS_PROTO,
e12bef50 321
bdd4ddde
JG
322 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
323 EDMA_ERR_PRD_PAR |
324 EDMA_ERR_DEV_DCON |
325 EDMA_ERR_DEV_CON |
326 EDMA_ERR_OVERRUN_5 |
327 EDMA_ERR_UNDERRUN_5 |
328 EDMA_ERR_SELF_DIS_5 |
6c1153e0 329 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
330 EDMA_ERR_CRPB_PAR |
331 EDMA_ERR_INTRL_PAR |
332 EDMA_ERR_IORDY,
20f733e7 333
31961943
BR
334 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
335 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
336
337 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
338 EDMA_REQ_Q_PTR_SHIFT = 5,
339
340 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
341 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
342 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
343 EDMA_RSP_Q_PTR_SHIFT = 3,
344
0ea9e179
JG
345 EDMA_CMD_OFS = 0x28, /* EDMA command register */
346 EDMA_EN = (1 << 0), /* enable EDMA */
347 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
348 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 349
c9d39130 350 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 351 EDMA_ARB_CFG = 0x38,
bca1c4eb 352
352fab70
ML
353 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
354
31961943
BR
355 /* Host private flags (hp_flags) */
356 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
357 MV_HP_ERRATA_50XXB0 = (1 << 1),
358 MV_HP_ERRATA_50XXB2 = (1 << 2),
359 MV_HP_ERRATA_60X1B2 = (1 << 3),
360 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 361 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
362 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
363 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
364 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 365 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 366
31961943 367 /* Port private flags (pp_flags) */
0ea9e179 368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 369 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
20f733e7
BR
370};
371
ee9ccdf7
JG
372#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
373#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 374#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
7bb3c529 375#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
bca1c4eb 376
15a32632
LB
377#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
378#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
379
095fec88 380enum {
baf14aa1
JG
381 /* DMA boundary 0xffff is required by the s/g splitting
382 * we need on /length/ in mv_fill-sg().
383 */
384 MV_DMA_BOUNDARY = 0xffffU,
095fec88 385
0ea9e179
JG
386 /* mask of register bits containing lower 32 bits
387 * of EDMA request queue DMA address
388 */
095fec88
JG
389 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
390
0ea9e179 391 /* ditto, for response queue */
095fec88
JG
392 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
393};
394
522479fb
JG
395enum chip_type {
396 chip_504x,
397 chip_508x,
398 chip_5080,
399 chip_604x,
400 chip_608x,
e4e7b892
JG
401 chip_6042,
402 chip_7042,
f351b2d6 403 chip_soc,
522479fb
JG
404};
405
31961943
BR
406/* Command ReQuest Block: 32B */
407struct mv_crqb {
e1469874
ML
408 __le32 sg_addr;
409 __le32 sg_addr_hi;
410 __le16 ctrl_flags;
411 __le16 ata_cmd[11];
31961943 412};
20f733e7 413
e4e7b892 414struct mv_crqb_iie {
e1469874
ML
415 __le32 addr;
416 __le32 addr_hi;
417 __le32 flags;
418 __le32 len;
419 __le32 ata_cmd[4];
e4e7b892
JG
420};
421
31961943
BR
422/* Command ResPonse Block: 8B */
423struct mv_crpb {
e1469874
ML
424 __le16 id;
425 __le16 flags;
426 __le32 tmstmp;
20f733e7
BR
427};
428
31961943
BR
429/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
430struct mv_sg {
e1469874
ML
431 __le32 addr;
432 __le32 flags_size;
433 __le32 addr_hi;
434 __le32 reserved;
31961943 435};
20f733e7 436
31961943
BR
437struct mv_port_priv {
438 struct mv_crqb *crqb;
439 dma_addr_t crqb_dma;
440 struct mv_crpb *crpb;
441 dma_addr_t crpb_dma;
eb73d558
ML
442 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
443 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
444
445 unsigned int req_idx;
446 unsigned int resp_idx;
447
31961943
BR
448 u32 pp_flags;
449};
450
bca1c4eb
JG
451struct mv_port_signal {
452 u32 amps;
453 u32 pre;
454};
455
02a121da
ML
456struct mv_host_priv {
457 u32 hp_flags;
458 struct mv_port_signal signal[8];
459 const struct mv_hw_ops *ops;
f351b2d6
SB
460 int n_ports;
461 void __iomem *base;
462 void __iomem *main_cause_reg_addr;
463 void __iomem *main_mask_reg_addr;
02a121da
ML
464 u32 irq_cause_ofs;
465 u32 irq_mask_ofs;
466 u32 unmask_all_irqs;
da2fa9ba
ML
467 /*
468 * These consistent DMA memory pools give us guaranteed
469 * alignment for hardware-accessed data structures,
470 * and less memory waste in accomplishing the alignment.
471 */
472 struct dma_pool *crqb_pool;
473 struct dma_pool *crpb_pool;
474 struct dma_pool *sg_tbl_pool;
02a121da
ML
475};
476
47c2b677 477struct mv_hw_ops {
2a47ce06
JG
478 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
47c2b677
JG
480 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
481 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
c9d39130
JG
483 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
522479fb 485 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 486 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
487};
488
da3dbb17
TH
489static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
490static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
491static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
492static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
493static int mv_port_start(struct ata_port *ap);
494static void mv_port_stop(struct ata_port *ap);
495static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 496static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 497static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
a1efdaba
TH
498static int mv_hardreset(struct ata_link *link, unsigned int *class,
499 unsigned long deadline);
bdd4ddde
JG
500static void mv_eh_freeze(struct ata_port *ap);
501static void mv_eh_thaw(struct ata_port *ap);
f273827e 502static void mv6_dev_config(struct ata_device *dev);
20f733e7 503
2a47ce06
JG
504static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
505 unsigned int port);
47c2b677
JG
506static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
507static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
508 void __iomem *mmio);
c9d39130
JG
509static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
510 unsigned int n_hc);
522479fb 511static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 512static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 513
2a47ce06
JG
514static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
515 unsigned int port);
47c2b677
JG
516static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
517static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
518 void __iomem *mmio);
c9d39130
JG
519static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
520 unsigned int n_hc);
522479fb 521static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
522static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
523 void __iomem *mmio);
524static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
525 void __iomem *mmio);
526static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
527 void __iomem *mmio, unsigned int n_hc);
528static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
529 void __iomem *mmio);
530static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
7bb3c529 531static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
e12bef50 532static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130 533 unsigned int port_no);
e12bef50 534static int mv_stop_edma(struct ata_port *ap);
b562468c 535static int mv_stop_edma_engine(void __iomem *port_mmio);
e12bef50 536static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
47c2b677 537
e49856d8
ML
538static void mv_pmp_select(struct ata_port *ap, int pmp);
539static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
540 unsigned long deadline);
541static int mv_softreset(struct ata_link *link, unsigned int *class,
542 unsigned long deadline);
47c2b677 543
eb73d558
ML
544/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
545 * because we have to allow room for worst case splitting of
546 * PRDs for 64K boundaries in mv_fill_sg().
547 */
c5d3e45a 548static struct scsi_host_template mv5_sht = {
68d1d07b 549 ATA_BASE_SHT(DRV_NAME),
baf14aa1 550 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a 551 .dma_boundary = MV_DMA_BOUNDARY,
c5d3e45a
JG
552};
553
554static struct scsi_host_template mv6_sht = {
68d1d07b 555 ATA_NCQ_SHT(DRV_NAME),
138bfdd0 556 .can_queue = MV_MAX_Q_DEPTH - 1,
baf14aa1 557 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7 558 .dma_boundary = MV_DMA_BOUNDARY,
20f733e7
BR
559};
560
029cfd6b
TH
561static struct ata_port_operations mv5_ops = {
562 .inherits = &ata_sff_port_ops,
c9d39130
JG
563
564 .qc_prep = mv_qc_prep,
565 .qc_issue = mv_qc_issue,
c9d39130 566
bdd4ddde
JG
567 .freeze = mv_eh_freeze,
568 .thaw = mv_eh_thaw,
a1efdaba 569 .hardreset = mv_hardreset,
a1efdaba 570 .error_handler = ata_std_error_handler, /* avoid SFF EH */
029cfd6b 571 .post_internal_cmd = ATA_OP_NULL,
bdd4ddde 572
c9d39130
JG
573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
575
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
c9d39130
JG
578};
579
029cfd6b
TH
580static struct ata_port_operations mv6_ops = {
581 .inherits = &mv5_ops,
e49856d8 582 .qc_defer = sata_pmp_qc_defer_cmd_switch,
f273827e 583 .dev_config = mv6_dev_config,
20f733e7
BR
584 .scr_read = mv_scr_read,
585 .scr_write = mv_scr_write,
586
e49856d8
ML
587 .pmp_hardreset = mv_pmp_hardreset,
588 .pmp_softreset = mv_softreset,
589 .softreset = mv_softreset,
590 .error_handler = sata_pmp_error_handler,
20f733e7
BR
591};
592
029cfd6b
TH
593static struct ata_port_operations mv_iie_ops = {
594 .inherits = &mv6_ops,
e49856d8 595 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
029cfd6b 596 .dev_config = ATA_OP_NULL,
e4e7b892 597 .qc_prep = mv_qc_prep_iie,
e4e7b892
JG
598};
599
98ac62de 600static const struct ata_port_info mv_port_info[] = {
20f733e7 601 { /* chip_504x */
cca3974e 602 .flags = MV_COMMON_FLAGS,
31961943 603 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 604 .udma_mask = ATA_UDMA6,
c9d39130 605 .port_ops = &mv5_ops,
20f733e7
BR
606 },
607 { /* chip_508x */
c5d3e45a 608 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 609 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 610 .udma_mask = ATA_UDMA6,
c9d39130 611 .port_ops = &mv5_ops,
20f733e7 612 },
47c2b677 613 { /* chip_5080 */
c5d3e45a 614 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 615 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 616 .udma_mask = ATA_UDMA6,
c9d39130 617 .port_ops = &mv5_ops,
47c2b677 618 },
20f733e7 619 { /* chip_604x */
138bfdd0 620 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 621 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 622 ATA_FLAG_NCQ,
31961943 623 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 624 .udma_mask = ATA_UDMA6,
c9d39130 625 .port_ops = &mv6_ops,
20f733e7
BR
626 },
627 { /* chip_608x */
c5d3e45a 628 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 629 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 630 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
31961943 631 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 632 .udma_mask = ATA_UDMA6,
c9d39130 633 .port_ops = &mv6_ops,
20f733e7 634 },
e4e7b892 635 { /* chip_6042 */
138bfdd0 636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 637 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 638 ATA_FLAG_NCQ,
e4e7b892 639 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 640 .udma_mask = ATA_UDMA6,
e4e7b892
JG
641 .port_ops = &mv_iie_ops,
642 },
643 { /* chip_7042 */
138bfdd0 644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 645 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 646 ATA_FLAG_NCQ,
e4e7b892 647 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 648 .udma_mask = ATA_UDMA6,
e4e7b892
JG
649 .port_ops = &mv_iie_ops,
650 },
f351b2d6 651 { /* chip_soc */
02c1f32f 652 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 653 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
02c1f32f 654 ATA_FLAG_NCQ | MV_FLAG_SOC,
17c5aab5
ML
655 .pio_mask = 0x1f, /* pio0-4 */
656 .udma_mask = ATA_UDMA6,
657 .port_ops = &mv_iie_ops,
f351b2d6 658 },
20f733e7
BR
659};
660
3b7d697d 661static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
662 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
663 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
664 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
665 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
666 /* RocketRAID 1740/174x have different identifiers */
667 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
668 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
669
670 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
671 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
672 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
673 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
674 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
675
676 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
677
d9f9c6bc
FA
678 /* Adaptec 1430SA */
679 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
680
02a121da 681 /* Marvell 7042 support */
6a3d586d
MT
682 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
683
02a121da
ML
684 /* Highpoint RocketRAID PCIe series */
685 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
686 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
687
2d2744fc 688 { } /* terminate list */
20f733e7
BR
689};
690
47c2b677
JG
691static const struct mv_hw_ops mv5xxx_ops = {
692 .phy_errata = mv5_phy_errata,
693 .enable_leds = mv5_enable_leds,
694 .read_preamp = mv5_read_preamp,
695 .reset_hc = mv5_reset_hc,
522479fb
JG
696 .reset_flash = mv5_reset_flash,
697 .reset_bus = mv5_reset_bus,
47c2b677
JG
698};
699
700static const struct mv_hw_ops mv6xxx_ops = {
701 .phy_errata = mv6_phy_errata,
702 .enable_leds = mv6_enable_leds,
703 .read_preamp = mv6_read_preamp,
704 .reset_hc = mv6_reset_hc,
522479fb
JG
705 .reset_flash = mv6_reset_flash,
706 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
707};
708
f351b2d6
SB
709static const struct mv_hw_ops mv_soc_ops = {
710 .phy_errata = mv6_phy_errata,
711 .enable_leds = mv_soc_enable_leds,
712 .read_preamp = mv_soc_read_preamp,
713 .reset_hc = mv_soc_reset_hc,
714 .reset_flash = mv_soc_reset_flash,
715 .reset_bus = mv_soc_reset_bus,
716};
717
20f733e7
BR
718/*
719 * Functions
720 */
721
722static inline void writelfl(unsigned long data, void __iomem *addr)
723{
724 writel(data, addr);
725 (void) readl(addr); /* flush to avoid PCI posted write */
726}
727
c9d39130
JG
728static inline unsigned int mv_hc_from_port(unsigned int port)
729{
730 return port >> MV_PORT_HC_SHIFT;
731}
732
733static inline unsigned int mv_hardport_from_port(unsigned int port)
734{
735 return port & MV_PORT_MASK;
736}
737
1cfd19ae
ML
738/*
739 * Consolidate some rather tricky bit shift calculations.
740 * This is hot-path stuff, so not a function.
741 * Simple code, with two return values, so macro rather than inline.
742 *
743 * port is the sole input, in range 0..7.
744 * shift is one output, for use with the main_cause and main_mask registers.
745 * hardport is the other output, in range 0..3
746 *
747 * Note that port and hardport may be the same variable in some cases.
748 */
749#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
750{ \
751 shift = mv_hc_from_port(port) * HC_SHIFT; \
752 hardport = mv_hardport_from_port(port); \
753 shift += hardport * 2; \
754}
755
352fab70
ML
756static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
757{
758 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
759}
760
c9d39130
JG
761static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
762 unsigned int port)
763{
764 return mv_hc_base(base, mv_hc_from_port(port));
765}
766
20f733e7
BR
767static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
768{
c9d39130 769 return mv_hc_base_from_port(base, port) +
8b260248 770 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 771 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
772}
773
e12bef50
ML
774static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
775{
776 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
777 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
778
779 return hc_mmio + ofs;
780}
781
f351b2d6
SB
782static inline void __iomem *mv_host_base(struct ata_host *host)
783{
784 struct mv_host_priv *hpriv = host->private_data;
785 return hpriv->base;
786}
787
20f733e7
BR
788static inline void __iomem *mv_ap_base(struct ata_port *ap)
789{
f351b2d6 790 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
791}
792
cca3974e 793static inline int mv_get_hc_count(unsigned long port_flags)
31961943 794{
cca3974e 795 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
796}
797
c5d3e45a
JG
798static void mv_set_edma_ptrs(void __iomem *port_mmio,
799 struct mv_host_priv *hpriv,
800 struct mv_port_priv *pp)
801{
bdd4ddde
JG
802 u32 index;
803
c5d3e45a
JG
804 /*
805 * initialize request queue
806 */
bdd4ddde
JG
807 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
808
c5d3e45a
JG
809 WARN_ON(pp->crqb_dma & 0x3ff);
810 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 811 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
812 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
813
814 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 815 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
816 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
817 else
bdd4ddde 818 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
819
820 /*
821 * initialize response queue
822 */
bdd4ddde
JG
823 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
824
c5d3e45a
JG
825 WARN_ON(pp->crpb_dma & 0xff);
826 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
827
828 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 829 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
830 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
831 else
bdd4ddde 832 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 833
bdd4ddde 834 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 835 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
836}
837
05b308e1
BR
838/**
839 * mv_start_dma - Enable eDMA engine
840 * @base: port base address
841 * @pp: port private data
842 *
beec7dbc
TH
843 * Verify the local cache of the eDMA state is accurate with a
844 * WARN_ON.
05b308e1
BR
845 *
846 * LOCKING:
847 * Inherited from caller.
848 */
0c58912e 849static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 850 struct mv_port_priv *pp, u8 protocol)
20f733e7 851{
72109168
ML
852 int want_ncq = (protocol == ATA_PROT_NCQ);
853
854 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
855 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
856 if (want_ncq != using_ncq)
b562468c 857 mv_stop_edma(ap);
72109168 858 }
c5d3e45a 859 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e 860 struct mv_host_priv *hpriv = ap->host->private_data;
352fab70 861 int hardport = mv_hardport_from_port(ap->port_no);
0c58912e 862 void __iomem *hc_mmio = mv_hc_base_from_port(
352fab70 863 mv_host_base(ap->host), hardport);
0c58912e
ML
864 u32 hc_irq_cause, ipending;
865
bdd4ddde 866 /* clear EDMA event indicators, if any */
f630d562 867 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 868
0c58912e
ML
869 /* clear EDMA interrupt indicator, if any */
870 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
352fab70 871 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
0c58912e
ML
872 if (hc_irq_cause & ipending) {
873 writelfl(hc_irq_cause & ~ipending,
874 hc_mmio + HC_IRQ_CAUSE_OFS);
875 }
876
e12bef50 877 mv_edma_cfg(ap, want_ncq);
0c58912e
ML
878
879 /* clear FIS IRQ Cause */
880 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
881
f630d562 882 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 883
f630d562 884 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
885 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
886 }
20f733e7
BR
887}
888
05b308e1 889/**
e12bef50 890 * mv_stop_edma_engine - Disable eDMA engine
b562468c 891 * @port_mmio: io base address
05b308e1
BR
892 *
893 * LOCKING:
894 * Inherited from caller.
895 */
b562468c 896static int mv_stop_edma_engine(void __iomem *port_mmio)
20f733e7 897{
b562468c 898 int i;
31961943 899
b562468c
ML
900 /* Disable eDMA. The disable bit auto clears. */
901 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
8b260248 902
b562468c
ML
903 /* Wait for the chip to confirm eDMA is off. */
904 for (i = 10000; i > 0; i--) {
905 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 906 if (!(reg & EDMA_EN))
b562468c
ML
907 return 0;
908 udelay(10);
31961943 909 }
b562468c 910 return -EIO;
20f733e7
BR
911}
912
e12bef50 913static int mv_stop_edma(struct ata_port *ap)
0ea9e179 914{
b562468c
ML
915 void __iomem *port_mmio = mv_ap_base(ap);
916 struct mv_port_priv *pp = ap->private_data;
0ea9e179 917
b562468c
ML
918 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
919 return 0;
920 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
921 if (mv_stop_edma_engine(port_mmio)) {
922 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
923 return -EIO;
924 }
925 return 0;
0ea9e179
JG
926}
927
8a70f8dc 928#ifdef ATA_DEBUG
31961943 929static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 930{
31961943
BR
931 int b, w;
932 for (b = 0; b < bytes; ) {
933 DPRINTK("%p: ", start + b);
934 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 935 printk("%08x ", readl(start + b));
31961943
BR
936 b += sizeof(u32);
937 }
938 printk("\n");
939 }
31961943 940}
8a70f8dc
JG
941#endif
942
31961943
BR
943static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
944{
945#ifdef ATA_DEBUG
946 int b, w;
947 u32 dw;
948 for (b = 0; b < bytes; ) {
949 DPRINTK("%02x: ", b);
950 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
951 (void) pci_read_config_dword(pdev, b, &dw);
952 printk("%08x ", dw);
31961943
BR
953 b += sizeof(u32);
954 }
955 printk("\n");
956 }
957#endif
958}
959static void mv_dump_all_regs(void __iomem *mmio_base, int port,
960 struct pci_dev *pdev)
961{
962#ifdef ATA_DEBUG
8b260248 963 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
964 port >> MV_PORT_HC_SHIFT);
965 void __iomem *port_base;
966 int start_port, num_ports, p, start_hc, num_hcs, hc;
967
968 if (0 > port) {
969 start_hc = start_port = 0;
970 num_ports = 8; /* shld be benign for 4 port devs */
971 num_hcs = 2;
972 } else {
973 start_hc = port >> MV_PORT_HC_SHIFT;
974 start_port = port;
975 num_ports = num_hcs = 1;
976 }
8b260248 977 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
978 num_ports > 1 ? num_ports - 1 : start_port);
979
980 if (NULL != pdev) {
981 DPRINTK("PCI config space regs:\n");
982 mv_dump_pci_cfg(pdev, 0x68);
983 }
984 DPRINTK("PCI regs:\n");
985 mv_dump_mem(mmio_base+0xc00, 0x3c);
986 mv_dump_mem(mmio_base+0xd00, 0x34);
987 mv_dump_mem(mmio_base+0xf00, 0x4);
988 mv_dump_mem(mmio_base+0x1d00, 0x6c);
989 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 990 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
991 DPRINTK("HC regs (HC %i):\n", hc);
992 mv_dump_mem(hc_base, 0x1c);
993 }
994 for (p = start_port; p < start_port + num_ports; p++) {
995 port_base = mv_port_base(mmio_base, p);
2dcb407e 996 DPRINTK("EDMA regs (port %i):\n", p);
31961943 997 mv_dump_mem(port_base, 0x54);
2dcb407e 998 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
999 mv_dump_mem(port_base+0x300, 0x60);
1000 }
1001#endif
20f733e7
BR
1002}
1003
1004static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1005{
1006 unsigned int ofs;
1007
1008 switch (sc_reg_in) {
1009 case SCR_STATUS:
1010 case SCR_CONTROL:
1011 case SCR_ERROR:
1012 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1013 break;
1014 case SCR_ACTIVE:
1015 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1016 break;
1017 default:
1018 ofs = 0xffffffffU;
1019 break;
1020 }
1021 return ofs;
1022}
1023
da3dbb17 1024static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1025{
1026 unsigned int ofs = mv_scr_offset(sc_reg_in);
1027
da3dbb17
TH
1028 if (ofs != 0xffffffffU) {
1029 *val = readl(mv_ap_base(ap) + ofs);
1030 return 0;
1031 } else
1032 return -EINVAL;
20f733e7
BR
1033}
1034
da3dbb17 1035static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1036{
1037 unsigned int ofs = mv_scr_offset(sc_reg_in);
1038
da3dbb17 1039 if (ofs != 0xffffffffU) {
20f733e7 1040 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1041 return 0;
1042 } else
1043 return -EINVAL;
20f733e7
BR
1044}
1045
f273827e
ML
1046static void mv6_dev_config(struct ata_device *adev)
1047{
1048 /*
e49856d8
ML
1049 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1050 *
1051 * Gen-II does not support NCQ over a port multiplier
1052 * (no FIS-based switching).
1053 *
f273827e
ML
1054 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1055 * See mv_qc_prep() for more info.
1056 */
e49856d8 1057 if (adev->flags & ATA_DFLAG_NCQ) {
352fab70 1058 if (sata_pmp_attached(adev->link->ap)) {
e49856d8 1059 adev->flags &= ~ATA_DFLAG_NCQ;
352fab70
ML
1060 ata_dev_printk(adev, KERN_INFO,
1061 "NCQ disabled for command-based switching\n");
1062 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1063 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1064 ata_dev_printk(adev, KERN_INFO,
1065 "max_sectors limited to %u for NCQ\n",
1066 adev->max_sectors);
1067 }
e49856d8 1068 }
f273827e
ML
1069}
1070
e49856d8
ML
1071static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1072{
1073 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1074 /*
1075 * Various bit settings required for operation
1076 * in FIS-based switching (fbs) mode on GenIIe:
1077 */
1078 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1079 old_ltmode = readl(port_mmio + LTMODE_OFS);
1080 if (enable_fbs) {
1081 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1082 new_ltmode = old_ltmode | LTMODE_BIT8;
1083 } else { /* disable fbs */
1084 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1085 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1086 }
1087 if (new_fcfg != old_fcfg)
1088 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1089 if (new_ltmode != old_ltmode)
1090 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
f273827e
ML
1091}
1092
e12bef50 1093static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
e4e7b892 1094{
0c58912e 1095 u32 cfg;
e12bef50
ML
1096 struct mv_port_priv *pp = ap->private_data;
1097 struct mv_host_priv *hpriv = ap->host->private_data;
1098 void __iomem *port_mmio = mv_ap_base(ap);
e4e7b892
JG
1099
1100 /* set up non-NCQ EDMA configuration */
0c58912e 1101 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1102
0c58912e 1103 if (IS_GEN_I(hpriv))
e4e7b892
JG
1104 cfg |= (1 << 8); /* enab config burst size mask */
1105
0c58912e 1106 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1107 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1108
1109 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1110 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1111 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1112 cfg |= (1 << 18); /* enab early completion */
e728eabe 1113 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e49856d8
ML
1114
1115 if (want_ncq && sata_pmp_attached(ap)) {
1116 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1117 mv_config_fbs(port_mmio, 1);
1118 } else {
1119 mv_config_fbs(port_mmio, 0);
1120 }
e4e7b892
JG
1121 }
1122
72109168
ML
1123 if (want_ncq) {
1124 cfg |= EDMA_CFG_NCQ;
1125 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1126 } else
1127 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1128
e4e7b892
JG
1129 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1130}
1131
da2fa9ba
ML
1132static void mv_port_free_dma_mem(struct ata_port *ap)
1133{
1134 struct mv_host_priv *hpriv = ap->host->private_data;
1135 struct mv_port_priv *pp = ap->private_data;
eb73d558 1136 int tag;
da2fa9ba
ML
1137
1138 if (pp->crqb) {
1139 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1140 pp->crqb = NULL;
1141 }
1142 if (pp->crpb) {
1143 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1144 pp->crpb = NULL;
1145 }
eb73d558
ML
1146 /*
1147 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1148 * For later hardware, we have one unique sg_tbl per NCQ tag.
1149 */
1150 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1151 if (pp->sg_tbl[tag]) {
1152 if (tag == 0 || !IS_GEN_I(hpriv))
1153 dma_pool_free(hpriv->sg_tbl_pool,
1154 pp->sg_tbl[tag],
1155 pp->sg_tbl_dma[tag]);
1156 pp->sg_tbl[tag] = NULL;
1157 }
da2fa9ba
ML
1158 }
1159}
1160
05b308e1
BR
1161/**
1162 * mv_port_start - Port specific init/start routine.
1163 * @ap: ATA channel to manipulate
1164 *
1165 * Allocate and point to DMA memory, init port private memory,
1166 * zero indices.
1167 *
1168 * LOCKING:
1169 * Inherited from caller.
1170 */
31961943
BR
1171static int mv_port_start(struct ata_port *ap)
1172{
cca3974e
JG
1173 struct device *dev = ap->host->dev;
1174 struct mv_host_priv *hpriv = ap->host->private_data;
31961943 1175 struct mv_port_priv *pp;
dde20207 1176 int tag;
31961943 1177
24dc5f33 1178 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1179 if (!pp)
24dc5f33 1180 return -ENOMEM;
da2fa9ba 1181 ap->private_data = pp;
31961943 1182
da2fa9ba
ML
1183 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1184 if (!pp->crqb)
1185 return -ENOMEM;
1186 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1187
da2fa9ba
ML
1188 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1189 if (!pp->crpb)
1190 goto out_port_free_dma_mem;
1191 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1192
eb73d558
ML
1193 /*
1194 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1195 * For later hardware, we need one unique sg_tbl per NCQ tag.
1196 */
1197 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1198 if (tag == 0 || !IS_GEN_I(hpriv)) {
1199 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1200 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1201 if (!pp->sg_tbl[tag])
1202 goto out_port_free_dma_mem;
1203 } else {
1204 pp->sg_tbl[tag] = pp->sg_tbl[0];
1205 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1206 }
1207 }
31961943 1208 return 0;
da2fa9ba
ML
1209
1210out_port_free_dma_mem:
1211 mv_port_free_dma_mem(ap);
1212 return -ENOMEM;
31961943
BR
1213}
1214
05b308e1
BR
1215/**
1216 * mv_port_stop - Port specific cleanup/stop routine.
1217 * @ap: ATA channel to manipulate
1218 *
1219 * Stop DMA, cleanup port memory.
1220 *
1221 * LOCKING:
cca3974e 1222 * This routine uses the host lock to protect the DMA stop.
05b308e1 1223 */
31961943
BR
1224static void mv_port_stop(struct ata_port *ap)
1225{
e12bef50 1226 mv_stop_edma(ap);
da2fa9ba 1227 mv_port_free_dma_mem(ap);
31961943
BR
1228}
1229
05b308e1
BR
1230/**
1231 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1232 * @qc: queued command whose SG list to source from
1233 *
1234 * Populate the SG list and mark the last entry.
1235 *
1236 * LOCKING:
1237 * Inherited from caller.
1238 */
6c08772e 1239static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1240{
1241 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1242 struct scatterlist *sg;
3be6cbd7 1243 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1244 unsigned int si;
31961943 1245
eb73d558 1246 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1247 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1248 dma_addr_t addr = sg_dma_address(sg);
1249 u32 sg_len = sg_dma_len(sg);
22374677 1250
4007b493
OJ
1251 while (sg_len) {
1252 u32 offset = addr & 0xffff;
1253 u32 len = sg_len;
22374677 1254
4007b493
OJ
1255 if ((offset + sg_len > 0x10000))
1256 len = 0x10000 - offset;
1257
1258 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1259 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1260 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1261
1262 sg_len -= len;
1263 addr += len;
1264
3be6cbd7 1265 last_sg = mv_sg;
4007b493 1266 mv_sg++;
4007b493 1267 }
31961943 1268 }
3be6cbd7
JG
1269
1270 if (likely(last_sg))
1271 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1272}
1273
5796d1c4 1274static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1275{
559eedad 1276 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1277 (last ? CRQB_CMD_LAST : 0);
559eedad 1278 *cmdw = cpu_to_le16(tmp);
31961943
BR
1279}
1280
05b308e1
BR
1281/**
1282 * mv_qc_prep - Host specific command preparation.
1283 * @qc: queued command to prepare
1284 *
1285 * This routine simply redirects to the general purpose routine
1286 * if command is not DMA. Else, it handles prep of the CRQB
1287 * (command request block), does some sanity checking, and calls
1288 * the SG load routine.
1289 *
1290 * LOCKING:
1291 * Inherited from caller.
1292 */
31961943
BR
1293static void mv_qc_prep(struct ata_queued_cmd *qc)
1294{
1295 struct ata_port *ap = qc->ap;
1296 struct mv_port_priv *pp = ap->private_data;
e1469874 1297 __le16 *cw;
31961943
BR
1298 struct ata_taskfile *tf;
1299 u16 flags = 0;
a6432436 1300 unsigned in_index;
31961943 1301
138bfdd0
ML
1302 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1303 (qc->tf.protocol != ATA_PROT_NCQ))
31961943 1304 return;
20f733e7 1305
31961943
BR
1306 /* Fill in command request block
1307 */
e4e7b892 1308 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1309 flags |= CRQB_FLAG_READ;
beec7dbc 1310 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1311 flags |= qc->tag << CRQB_TAG_SHIFT;
e49856d8 1312 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
31961943 1313
bdd4ddde
JG
1314 /* get current queue index from software */
1315 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1316
1317 pp->crqb[in_index].sg_addr =
eb73d558 1318 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 1319 pp->crqb[in_index].sg_addr_hi =
eb73d558 1320 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 1321 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1322
a6432436 1323 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1324 tf = &qc->tf;
1325
1326 /* Sadly, the CRQB cannot accomodate all registers--there are
1327 * only 11 bytes...so we must pick and choose required
1328 * registers based on the command. So, we drop feature and
1329 * hob_feature for [RW] DMA commands, but they are needed for
1330 * NCQ. NCQ will drop hob_nsect.
20f733e7 1331 */
31961943
BR
1332 switch (tf->command) {
1333 case ATA_CMD_READ:
1334 case ATA_CMD_READ_EXT:
1335 case ATA_CMD_WRITE:
1336 case ATA_CMD_WRITE_EXT:
c15d85c8 1337 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1338 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1339 break;
31961943
BR
1340 case ATA_CMD_FPDMA_READ:
1341 case ATA_CMD_FPDMA_WRITE:
8b260248 1342 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1343 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1344 break;
31961943
BR
1345 default:
1346 /* The only other commands EDMA supports in non-queued and
1347 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1348 * of which are defined/used by Linux. If we get here, this
1349 * driver needs work.
1350 *
1351 * FIXME: modify libata to give qc_prep a return value and
1352 * return error here.
1353 */
1354 BUG_ON(tf->command);
1355 break;
1356 }
1357 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1361 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1363 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1365 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1366
e4e7b892
JG
1367 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1368 return;
1369 mv_fill_sg(qc);
1370}
1371
1372/**
1373 * mv_qc_prep_iie - Host specific command preparation.
1374 * @qc: queued command to prepare
1375 *
1376 * This routine simply redirects to the general purpose routine
1377 * if command is not DMA. Else, it handles prep of the CRQB
1378 * (command request block), does some sanity checking, and calls
1379 * the SG load routine.
1380 *
1381 * LOCKING:
1382 * Inherited from caller.
1383 */
1384static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1385{
1386 struct ata_port *ap = qc->ap;
1387 struct mv_port_priv *pp = ap->private_data;
1388 struct mv_crqb_iie *crqb;
1389 struct ata_taskfile *tf;
a6432436 1390 unsigned in_index;
e4e7b892
JG
1391 u32 flags = 0;
1392
138bfdd0
ML
1393 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1394 (qc->tf.protocol != ATA_PROT_NCQ))
e4e7b892
JG
1395 return;
1396
e12bef50 1397 /* Fill in Gen IIE command request block */
e4e7b892
JG
1398 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1399 flags |= CRQB_FLAG_READ;
1400
beec7dbc 1401 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1402 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 1403 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e49856d8 1404 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
e4e7b892 1405
bdd4ddde
JG
1406 /* get current queue index from software */
1407 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1408
1409 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
1410 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1411 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
1412 crqb->flags = cpu_to_le32(flags);
1413
1414 tf = &qc->tf;
1415 crqb->ata_cmd[0] = cpu_to_le32(
1416 (tf->command << 16) |
1417 (tf->feature << 24)
1418 );
1419 crqb->ata_cmd[1] = cpu_to_le32(
1420 (tf->lbal << 0) |
1421 (tf->lbam << 8) |
1422 (tf->lbah << 16) |
1423 (tf->device << 24)
1424 );
1425 crqb->ata_cmd[2] = cpu_to_le32(
1426 (tf->hob_lbal << 0) |
1427 (tf->hob_lbam << 8) |
1428 (tf->hob_lbah << 16) |
1429 (tf->hob_feature << 24)
1430 );
1431 crqb->ata_cmd[3] = cpu_to_le32(
1432 (tf->nsect << 0) |
1433 (tf->hob_nsect << 8)
1434 );
1435
1436 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1437 return;
31961943
BR
1438 mv_fill_sg(qc);
1439}
1440
05b308e1
BR
1441/**
1442 * mv_qc_issue - Initiate a command to the host
1443 * @qc: queued command to start
1444 *
1445 * This routine simply redirects to the general purpose routine
1446 * if command is not DMA. Else, it sanity checks our local
1447 * caches of the request producer/consumer indices then enables
1448 * DMA and bumps the request producer index.
1449 *
1450 * LOCKING:
1451 * Inherited from caller.
1452 */
9a3d9eb0 1453static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1454{
c5d3e45a
JG
1455 struct ata_port *ap = qc->ap;
1456 void __iomem *port_mmio = mv_ap_base(ap);
1457 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1458 u32 in_index;
31961943 1459
138bfdd0
ML
1460 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1461 (qc->tf.protocol != ATA_PROT_NCQ)) {
17c5aab5
ML
1462 /*
1463 * We're about to send a non-EDMA capable command to the
31961943
BR
1464 * port. Turn off EDMA so there won't be problems accessing
1465 * shadow block, etc registers.
1466 */
b562468c 1467 mv_stop_edma(ap);
e49856d8 1468 mv_pmp_select(ap, qc->dev->link->pmp);
9363c382 1469 return ata_sff_qc_issue(qc);
31961943
BR
1470 }
1471
72109168 1472 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde 1473
bdd4ddde 1474 pp->req_idx++;
31961943 1475
bdd4ddde 1476 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1477
1478 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1479 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1480 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1481
1482 return 0;
1483}
1484
05b308e1
BR
1485/**
1486 * mv_err_intr - Handle error interrupts on the port
1487 * @ap: ATA channel to manipulate
9b358e30 1488 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1489 *
1490 * In most cases, just clear the interrupt and move on. However,
e12bef50
ML
1491 * some cases require an eDMA reset, which also performs a COMRESET.
1492 * The SERR case requires a clear of pending errors in the SATA
1493 * SERROR register. Finally, if the port disabled DMA,
1494 * update our cached copy to match.
05b308e1
BR
1495 *
1496 * LOCKING:
1497 * Inherited from caller.
1498 */
bdd4ddde 1499static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1500{
1501 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1502 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1503 struct mv_port_priv *pp = ap->private_data;
1504 struct mv_host_priv *hpriv = ap->host->private_data;
1505 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1506 unsigned int action = 0, err_mask = 0;
9af5c9c9 1507 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1508
bdd4ddde 1509 ata_ehi_clear_desc(ehi);
20f733e7 1510
bdd4ddde
JG
1511 if (!edma_enabled) {
1512 /* just a guess: do we need to do this? should we
1513 * expand this, and do it in all cases?
1514 */
936fd732
TH
1515 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1516 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1517 }
bdd4ddde
JG
1518
1519 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1520
352fab70 1521 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
bdd4ddde
JG
1522
1523 /*
352fab70 1524 * All generations share these EDMA error cause bits:
bdd4ddde 1525 */
bdd4ddde
JG
1526 if (edma_err_cause & EDMA_ERR_DEV)
1527 err_mask |= AC_ERR_DEV;
1528 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1529 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1530 EDMA_ERR_INTRL_PAR)) {
1531 err_mask |= AC_ERR_ATA_BUS;
cf480626 1532 action |= ATA_EH_RESET;
b64bbc39 1533 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1534 }
1535 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1536 ata_ehi_hotplugged(ehi);
1537 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1538 "dev disconnect" : "dev connect");
cf480626 1539 action |= ATA_EH_RESET;
bdd4ddde
JG
1540 }
1541
352fab70
ML
1542 /*
1543 * Gen-I has a different SELF_DIS bit,
1544 * different FREEZE bits, and no SERR bit:
1545 */
ee9ccdf7 1546 if (IS_GEN_I(hpriv)) {
bdd4ddde 1547 eh_freeze_mask = EDMA_EH_FREEZE_5;
bdd4ddde 1548 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
bdd4ddde 1549 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1550 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1551 }
1552 } else {
1553 eh_freeze_mask = EDMA_EH_FREEZE;
bdd4ddde 1554 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
bdd4ddde 1555 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1556 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde 1557 }
bdd4ddde 1558 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1559 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1560 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde 1561 err_mask = AC_ERR_ATA_BUS;
cf480626 1562 action |= ATA_EH_RESET;
bdd4ddde 1563 }
afb0edd9 1564 }
20f733e7
BR
1565
1566 /* Clear EDMA now that SERR cleanup done */
3606a380 1567 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1568
bdd4ddde
JG
1569 if (!err_mask) {
1570 err_mask = AC_ERR_OTHER;
cf480626 1571 action |= ATA_EH_RESET;
bdd4ddde
JG
1572 }
1573
1574 ehi->serror |= serr;
1575 ehi->action |= action;
1576
1577 if (qc)
1578 qc->err_mask |= err_mask;
1579 else
1580 ehi->err_mask |= err_mask;
1581
1582 if (edma_err_cause & eh_freeze_mask)
1583 ata_port_freeze(ap);
1584 else
1585 ata_port_abort(ap);
1586}
1587
1588static void mv_intr_pio(struct ata_port *ap)
1589{
1590 struct ata_queued_cmd *qc;
1591 u8 ata_status;
1592
1593 /* ignore spurious intr if drive still BUSY */
1594 ata_status = readb(ap->ioaddr.status_addr);
1595 if (unlikely(ata_status & ATA_BUSY))
1596 return;
1597
1598 /* get active ATA command */
9af5c9c9 1599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1600 if (unlikely(!qc)) /* no active tag */
1601 return;
1602 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1603 return;
1604
1605 /* and finally, complete the ATA command */
1606 qc->err_mask |= ac_err_mask(ata_status);
1607 ata_qc_complete(qc);
1608}
1609
1610static void mv_intr_edma(struct ata_port *ap)
1611{
1612 void __iomem *port_mmio = mv_ap_base(ap);
1613 struct mv_host_priv *hpriv = ap->host->private_data;
1614 struct mv_port_priv *pp = ap->private_data;
1615 struct ata_queued_cmd *qc;
1616 u32 out_index, in_index;
1617 bool work_done = false;
1618
1619 /* get h/w response queue pointer */
1620 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1621 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1622
1623 while (1) {
1624 u16 status;
6c1153e0 1625 unsigned int tag;
bdd4ddde
JG
1626
1627 /* get s/w response queue last-read pointer, and compare */
1628 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1629 if (in_index == out_index)
1630 break;
1631
bdd4ddde 1632 /* 50xx: get active ATA command */
0ea9e179 1633 if (IS_GEN_I(hpriv))
9af5c9c9 1634 tag = ap->link.active_tag;
bdd4ddde 1635
6c1153e0
JG
1636 /* Gen II/IIE: get active ATA command via tag, to enable
1637 * support for queueing. this works transparently for
1638 * queued and non-queued modes.
bdd4ddde 1639 */
8c0aeb4a
ML
1640 else
1641 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
bdd4ddde 1642
6c1153e0 1643 qc = ata_qc_from_tag(ap, tag);
bdd4ddde 1644
cb924419
ML
1645 /* For non-NCQ mode, the lower 8 bits of status
1646 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1647 * which should be zero if all went well.
bdd4ddde
JG
1648 */
1649 status = le16_to_cpu(pp->crpb[out_index].flags);
cb924419 1650 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
bdd4ddde
JG
1651 mv_err_intr(ap, qc);
1652 return;
1653 }
1654
1655 /* and finally, complete the ATA command */
1656 if (qc) {
1657 qc->err_mask |=
1658 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1659 ata_qc_complete(qc);
1660 }
1661
0ea9e179 1662 /* advance software response queue pointer, to
bdd4ddde
JG
1663 * indicate (after the loop completes) to hardware
1664 * that we have consumed a response queue entry.
1665 */
1666 work_done = true;
1667 pp->resp_idx++;
1668 }
1669
352fab70 1670 /* Update the software queue position index in hardware */
bdd4ddde
JG
1671 if (work_done)
1672 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1673 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1674 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1675}
1676
05b308e1
BR
1677/**
1678 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1679 * @host: host specific structure
05b308e1
BR
1680 * @relevant: port error bits relevant to this host controller
1681 * @hc: which host controller we're to look at
1682 *
1683 * Read then write clear the HC interrupt status then walk each
1684 * port connected to the HC and see if it needs servicing. Port
1685 * success ints are reported in the HC interrupt status reg, the
1686 * port error ints are reported in the higher level main
1687 * interrupt status register and thus are passed in via the
1688 * 'relevant' argument.
1689 *
1690 * LOCKING:
1691 * Inherited from caller.
1692 */
cca3974e 1693static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1694{
f351b2d6
SB
1695 struct mv_host_priv *hpriv = host->private_data;
1696 void __iomem *mmio = hpriv->base;
20f733e7 1697 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1698 u32 hc_irq_cause;
f351b2d6 1699 int port, port0, last_port;
20f733e7 1700
35177265 1701 if (hc == 0)
20f733e7 1702 port0 = 0;
35177265 1703 else
20f733e7 1704 port0 = MV_PORTS_PER_HC;
20f733e7 1705
f351b2d6
SB
1706 if (HAS_PCI(host))
1707 last_port = port0 + MV_PORTS_PER_HC;
1708 else
1709 last_port = port0 + hpriv->n_ports;
20f733e7
BR
1710 /* we'll need the HC success int register in most cases */
1711 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1712 if (!hc_irq_cause)
1713 return;
1714
1715 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1716
1717 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1718 hc, relevant, hc_irq_cause);
20f733e7 1719
8f71efe2 1720 for (port = port0; port < last_port; port++) {
cca3974e 1721 struct ata_port *ap = host->ports[port];
8f71efe2 1722 struct mv_port_priv *pp;
352fab70 1723 int have_err_bits, hardport, shift;
55d8ca4f 1724
bdd4ddde 1725 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1726 continue;
1727
8f71efe2
YL
1728 pp = ap->private_data;
1729
31961943 1730 shift = port << 1; /* (port * 2) */
e12bef50 1731 if (port >= MV_PORTS_PER_HC)
20f733e7 1732 shift++; /* skip bit 8 in the HC Main IRQ reg */
e12bef50 1733
352fab70 1734 have_err_bits = ((ERR_IRQ << shift) & relevant);
bdd4ddde
JG
1735
1736 if (unlikely(have_err_bits)) {
1737 struct ata_queued_cmd *qc;
8b260248 1738
9af5c9c9 1739 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1740 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1741 continue;
1742
1743 mv_err_intr(ap, qc);
1744 continue;
1745 }
1746
352fab70 1747 hardport = mv_hardport_from_port(port); /* range 0..3 */
bdd4ddde
JG
1748
1749 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
352fab70 1750 if ((DMA_IRQ << hardport) & hc_irq_cause)
bdd4ddde
JG
1751 mv_intr_edma(ap);
1752 } else {
352fab70 1753 if ((DEV_IRQ << hardport) & hc_irq_cause)
bdd4ddde 1754 mv_intr_pio(ap);
20f733e7
BR
1755 }
1756 }
1757 VPRINTK("EXIT\n");
1758}
1759
bdd4ddde
JG
1760static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1761{
02a121da 1762 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1763 struct ata_port *ap;
1764 struct ata_queued_cmd *qc;
1765 struct ata_eh_info *ehi;
1766 unsigned int i, err_mask, printed = 0;
1767 u32 err_cause;
1768
02a121da 1769 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1770
1771 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1772 err_cause);
1773
1774 DPRINTK("All regs @ PCI error\n");
1775 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1776
02a121da 1777 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1778
1779 for (i = 0; i < host->n_ports; i++) {
1780 ap = host->ports[i];
936fd732 1781 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1782 ehi = &ap->link.eh_info;
bdd4ddde
JG
1783 ata_ehi_clear_desc(ehi);
1784 if (!printed++)
1785 ata_ehi_push_desc(ehi,
1786 "PCI err cause 0x%08x", err_cause);
1787 err_mask = AC_ERR_HOST_BUS;
cf480626 1788 ehi->action = ATA_EH_RESET;
9af5c9c9 1789 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1790 if (qc)
1791 qc->err_mask |= err_mask;
1792 else
1793 ehi->err_mask |= err_mask;
1794
1795 ata_port_freeze(ap);
1796 }
1797 }
1798}
1799
05b308e1 1800/**
c5d3e45a 1801 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1802 * @irq: unused
1803 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1804 *
1805 * Read the read only register to determine if any host
1806 * controllers have pending interrupts. If so, call lower level
1807 * routine to handle. Also check for PCI errors which are only
1808 * reported here.
1809 *
8b260248 1810 * LOCKING:
cca3974e 1811 * This routine holds the host lock while processing pending
05b308e1
BR
1812 * interrupts.
1813 */
7d12e780 1814static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1815{
cca3974e 1816 struct ata_host *host = dev_instance;
f351b2d6 1817 struct mv_host_priv *hpriv = host->private_data;
20f733e7 1818 unsigned int hc, handled = 0, n_hcs;
f351b2d6 1819 void __iomem *mmio = hpriv->base;
352fab70 1820 u32 main_cause, main_mask;
20f733e7 1821
646a4da5 1822 spin_lock(&host->lock);
352fab70
ML
1823 main_cause = readl(hpriv->main_cause_reg_addr);
1824 main_mask = readl(hpriv->main_mask_reg_addr);
1825 /*
1826 * Deal with cases where we either have nothing pending, or have read
1827 * a bogus register value which can indicate HW removal or PCI fault.
20f733e7 1828 */
352fab70 1829 if (!(main_cause & main_mask) || (main_cause == 0xffffffffU))
646a4da5 1830 goto out_unlock;
20f733e7 1831
cca3974e 1832 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1833
352fab70 1834 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) {
bdd4ddde
JG
1835 mv_pci_error(host, mmio);
1836 handled = 1;
1837 goto out_unlock; /* skip all other HC irq handling */
1838 }
1839
20f733e7 1840 for (hc = 0; hc < n_hcs; hc++) {
352fab70 1841 u32 relevant = main_cause & (HC0_IRQ_PEND << (hc * HC_SHIFT));
20f733e7 1842 if (relevant) {
cca3974e 1843 mv_host_intr(host, relevant, hc);
bdd4ddde 1844 handled = 1;
20f733e7
BR
1845 }
1846 }
615ab953 1847
bdd4ddde 1848out_unlock:
cca3974e 1849 spin_unlock(&host->lock);
20f733e7
BR
1850 return IRQ_RETVAL(handled);
1851}
1852
c9d39130
JG
1853static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1854{
1855 unsigned int ofs;
1856
1857 switch (sc_reg_in) {
1858 case SCR_STATUS:
1859 case SCR_ERROR:
1860 case SCR_CONTROL:
1861 ofs = sc_reg_in * sizeof(u32);
1862 break;
1863 default:
1864 ofs = 0xffffffffU;
1865 break;
1866 }
1867 return ofs;
1868}
1869
da3dbb17 1870static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1871{
f351b2d6
SB
1872 struct mv_host_priv *hpriv = ap->host->private_data;
1873 void __iomem *mmio = hpriv->base;
0d5ff566 1874 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1875 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1876
da3dbb17
TH
1877 if (ofs != 0xffffffffU) {
1878 *val = readl(addr + ofs);
1879 return 0;
1880 } else
1881 return -EINVAL;
c9d39130
JG
1882}
1883
da3dbb17 1884static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1885{
f351b2d6
SB
1886 struct mv_host_priv *hpriv = ap->host->private_data;
1887 void __iomem *mmio = hpriv->base;
0d5ff566 1888 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1889 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1890
da3dbb17 1891 if (ofs != 0xffffffffU) {
0d5ff566 1892 writelfl(val, addr + ofs);
da3dbb17
TH
1893 return 0;
1894 } else
1895 return -EINVAL;
c9d39130
JG
1896}
1897
7bb3c529 1898static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 1899{
7bb3c529 1900 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
1901 int early_5080;
1902
44c10138 1903 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1904
1905 if (!early_5080) {
1906 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1907 tmp |= (1 << 0);
1908 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1909 }
1910
7bb3c529 1911 mv_reset_pci_bus(host, mmio);
522479fb
JG
1912}
1913
1914static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1915{
1916 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1917}
1918
47c2b677 1919static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1920 void __iomem *mmio)
1921{
c9d39130
JG
1922 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1923 u32 tmp;
1924
1925 tmp = readl(phy_mmio + MV5_PHY_MODE);
1926
1927 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1928 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1929}
1930
47c2b677 1931static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1932{
522479fb
JG
1933 u32 tmp;
1934
1935 writel(0, mmio + MV_GPIO_PORT_CTL);
1936
1937 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1938
1939 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1940 tmp |= ~(1 << 0);
1941 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1942}
1943
2a47ce06
JG
1944static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1945 unsigned int port)
bca1c4eb 1946{
c9d39130
JG
1947 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1948 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1949 u32 tmp;
1950 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1951
1952 if (fix_apm_sq) {
1953 tmp = readl(phy_mmio + MV5_LT_MODE);
1954 tmp |= (1 << 19);
1955 writel(tmp, phy_mmio + MV5_LT_MODE);
1956
1957 tmp = readl(phy_mmio + MV5_PHY_CTL);
1958 tmp &= ~0x3;
1959 tmp |= 0x1;
1960 writel(tmp, phy_mmio + MV5_PHY_CTL);
1961 }
1962
1963 tmp = readl(phy_mmio + MV5_PHY_MODE);
1964 tmp &= ~mask;
1965 tmp |= hpriv->signal[port].pre;
1966 tmp |= hpriv->signal[port].amps;
1967 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1968}
1969
c9d39130
JG
1970
1971#undef ZERO
1972#define ZERO(reg) writel(0, port_mmio + (reg))
1973static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1974 unsigned int port)
1975{
1976 void __iomem *port_mmio = mv_port_base(mmio, port);
1977
b562468c
ML
1978 /*
1979 * The datasheet warns against setting ATA_RST when EDMA is active
1980 * (but doesn't say what the problem might be). So we first try
1981 * to disable the EDMA engine before doing the ATA_RST operation.
1982 */
e12bef50 1983 mv_reset_channel(hpriv, mmio, port);
c9d39130
JG
1984
1985 ZERO(0x028); /* command */
1986 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1987 ZERO(0x004); /* timer */
1988 ZERO(0x008); /* irq err cause */
1989 ZERO(0x00c); /* irq err mask */
1990 ZERO(0x010); /* rq bah */
1991 ZERO(0x014); /* rq inp */
1992 ZERO(0x018); /* rq outp */
1993 ZERO(0x01c); /* respq bah */
1994 ZERO(0x024); /* respq outp */
1995 ZERO(0x020); /* respq inp */
1996 ZERO(0x02c); /* test control */
1997 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1998}
1999#undef ZERO
2000
2001#define ZERO(reg) writel(0, hc_mmio + (reg))
2002static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2003 unsigned int hc)
47c2b677 2004{
c9d39130
JG
2005 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2006 u32 tmp;
2007
2008 ZERO(0x00c);
2009 ZERO(0x010);
2010 ZERO(0x014);
2011 ZERO(0x018);
2012
2013 tmp = readl(hc_mmio + 0x20);
2014 tmp &= 0x1c1c1c1c;
2015 tmp |= 0x03030303;
2016 writel(tmp, hc_mmio + 0x20);
2017}
2018#undef ZERO
2019
2020static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2021 unsigned int n_hc)
2022{
2023 unsigned int hc, port;
2024
2025 for (hc = 0; hc < n_hc; hc++) {
2026 for (port = 0; port < MV_PORTS_PER_HC; port++)
2027 mv5_reset_hc_port(hpriv, mmio,
2028 (hc * MV_PORTS_PER_HC) + port);
2029
2030 mv5_reset_one_hc(hpriv, mmio, hc);
2031 }
2032
2033 return 0;
47c2b677
JG
2034}
2035
101ffae2
JG
2036#undef ZERO
2037#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 2038static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 2039{
02a121da 2040 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
2041 u32 tmp;
2042
2043 tmp = readl(mmio + MV_PCI_MODE);
2044 tmp &= 0xff00ffff;
2045 writel(tmp, mmio + MV_PCI_MODE);
2046
2047 ZERO(MV_PCI_DISC_TIMER);
2048 ZERO(MV_PCI_MSI_TRIGGER);
2049 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2050 ZERO(HC_MAIN_IRQ_MASK_OFS);
2051 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
2052 ZERO(hpriv->irq_cause_ofs);
2053 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
2054 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2055 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2056 ZERO(MV_PCI_ERR_ATTRIBUTE);
2057 ZERO(MV_PCI_ERR_COMMAND);
2058}
2059#undef ZERO
2060
2061static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2062{
2063 u32 tmp;
2064
2065 mv5_reset_flash(hpriv, mmio);
2066
2067 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2068 tmp &= 0x3;
2069 tmp |= (1 << 5) | (1 << 6);
2070 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2071}
2072
2073/**
2074 * mv6_reset_hc - Perform the 6xxx global soft reset
2075 * @mmio: base address of the HBA
2076 *
2077 * This routine only applies to 6xxx parts.
2078 *
2079 * LOCKING:
2080 * Inherited from caller.
2081 */
c9d39130
JG
2082static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2083 unsigned int n_hc)
101ffae2
JG
2084{
2085 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2086 int i, rc = 0;
2087 u32 t;
2088
2089 /* Following procedure defined in PCI "main command and status
2090 * register" table.
2091 */
2092 t = readl(reg);
2093 writel(t | STOP_PCI_MASTER, reg);
2094
2095 for (i = 0; i < 1000; i++) {
2096 udelay(1);
2097 t = readl(reg);
2dcb407e 2098 if (PCI_MASTER_EMPTY & t)
101ffae2 2099 break;
101ffae2
JG
2100 }
2101 if (!(PCI_MASTER_EMPTY & t)) {
2102 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2103 rc = 1;
2104 goto done;
2105 }
2106
2107 /* set reset */
2108 i = 5;
2109 do {
2110 writel(t | GLOB_SFT_RST, reg);
2111 t = readl(reg);
2112 udelay(1);
2113 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2114
2115 if (!(GLOB_SFT_RST & t)) {
2116 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2117 rc = 1;
2118 goto done;
2119 }
2120
2121 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2122 i = 5;
2123 do {
2124 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2125 t = readl(reg);
2126 udelay(1);
2127 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2128
2129 if (GLOB_SFT_RST & t) {
2130 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2131 rc = 1;
2132 }
094e50b2
ML
2133 /*
2134 * Temporary: wait 3 seconds before port-probing can happen,
2135 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2136 * This can go away once hotplug is fully/correctly implemented.
2137 */
2138 if (rc == 0)
2139 msleep(3000);
101ffae2
JG
2140done:
2141 return rc;
2142}
2143
47c2b677 2144static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2145 void __iomem *mmio)
2146{
2147 void __iomem *port_mmio;
2148 u32 tmp;
2149
ba3fe8fb
JG
2150 tmp = readl(mmio + MV_RESET_CFG);
2151 if ((tmp & (1 << 0)) == 0) {
47c2b677 2152 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2153 hpriv->signal[idx].pre = 0x1 << 5;
2154 return;
2155 }
2156
2157 port_mmio = mv_port_base(mmio, idx);
2158 tmp = readl(port_mmio + PHY_MODE2);
2159
2160 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2161 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2162}
2163
47c2b677 2164static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2165{
47c2b677 2166 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2167}
2168
c9d39130 2169static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2170 unsigned int port)
bca1c4eb 2171{
c9d39130
JG
2172 void __iomem *port_mmio = mv_port_base(mmio, port);
2173
bca1c4eb 2174 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2175 int fix_phy_mode2 =
2176 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2177 int fix_phy_mode4 =
47c2b677
JG
2178 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2179 u32 m2, tmp;
2180
2181 if (fix_phy_mode2) {
2182 m2 = readl(port_mmio + PHY_MODE2);
2183 m2 &= ~(1 << 16);
2184 m2 |= (1 << 31);
2185 writel(m2, port_mmio + PHY_MODE2);
2186
2187 udelay(200);
2188
2189 m2 = readl(port_mmio + PHY_MODE2);
2190 m2 &= ~((1 << 16) | (1 << 31));
2191 writel(m2, port_mmio + PHY_MODE2);
2192
2193 udelay(200);
2194 }
2195
2196 /* who knows what this magic does */
2197 tmp = readl(port_mmio + PHY_MODE3);
2198 tmp &= ~0x7F800000;
2199 tmp |= 0x2A800000;
2200 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2201
2202 if (fix_phy_mode4) {
47c2b677 2203 u32 m4;
bca1c4eb
JG
2204
2205 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2206
2207 if (hp_flags & MV_HP_ERRATA_60X1B2)
e12bef50 2208 tmp = readl(port_mmio + PHY_MODE3);
bca1c4eb 2209
e12bef50 2210 /* workaround for errata FEr SATA#10 (part 1) */
bca1c4eb
JG
2211 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2212
2213 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2214
2215 if (hp_flags & MV_HP_ERRATA_60X1B2)
e12bef50 2216 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2217 }
2218
2219 /* Revert values of pre-emphasis and signal amps to the saved ones */
2220 m2 = readl(port_mmio + PHY_MODE2);
2221
2222 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2223 m2 |= hpriv->signal[port].amps;
2224 m2 |= hpriv->signal[port].pre;
47c2b677 2225 m2 &= ~(1 << 16);
bca1c4eb 2226
e4e7b892
JG
2227 /* according to mvSata 3.6.1, some IIE values are fixed */
2228 if (IS_GEN_IIE(hpriv)) {
2229 m2 &= ~0xC30FF01F;
2230 m2 |= 0x0000900F;
2231 }
2232
bca1c4eb
JG
2233 writel(m2, port_mmio + PHY_MODE2);
2234}
2235
f351b2d6
SB
2236/* TODO: use the generic LED interface to configure the SATA Presence */
2237/* & Acitivy LEDs on the board */
2238static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2239 void __iomem *mmio)
2240{
2241 return;
2242}
2243
2244static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2245 void __iomem *mmio)
2246{
2247 void __iomem *port_mmio;
2248 u32 tmp;
2249
2250 port_mmio = mv_port_base(mmio, idx);
2251 tmp = readl(port_mmio + PHY_MODE2);
2252
2253 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2254 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2255}
2256
2257#undef ZERO
2258#define ZERO(reg) writel(0, port_mmio + (reg))
2259static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2260 void __iomem *mmio, unsigned int port)
2261{
2262 void __iomem *port_mmio = mv_port_base(mmio, port);
2263
b562468c
ML
2264 /*
2265 * The datasheet warns against setting ATA_RST when EDMA is active
2266 * (but doesn't say what the problem might be). So we first try
2267 * to disable the EDMA engine before doing the ATA_RST operation.
2268 */
e12bef50 2269 mv_reset_channel(hpriv, mmio, port);
f351b2d6
SB
2270
2271 ZERO(0x028); /* command */
2272 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2273 ZERO(0x004); /* timer */
2274 ZERO(0x008); /* irq err cause */
2275 ZERO(0x00c); /* irq err mask */
2276 ZERO(0x010); /* rq bah */
2277 ZERO(0x014); /* rq inp */
2278 ZERO(0x018); /* rq outp */
2279 ZERO(0x01c); /* respq bah */
2280 ZERO(0x024); /* respq outp */
2281 ZERO(0x020); /* respq inp */
2282 ZERO(0x02c); /* test control */
2283 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2284}
2285
2286#undef ZERO
2287
2288#define ZERO(reg) writel(0, hc_mmio + (reg))
2289static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2290 void __iomem *mmio)
2291{
2292 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2293
2294 ZERO(0x00c);
2295 ZERO(0x010);
2296 ZERO(0x014);
2297
2298}
2299
2300#undef ZERO
2301
2302static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2303 void __iomem *mmio, unsigned int n_hc)
2304{
2305 unsigned int port;
2306
2307 for (port = 0; port < hpriv->n_ports; port++)
2308 mv_soc_reset_hc_port(hpriv, mmio, port);
2309
2310 mv_soc_reset_one_hc(hpriv, mmio);
2311
2312 return 0;
2313}
2314
2315static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2316 void __iomem *mmio)
2317{
2318 return;
2319}
2320
2321static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2322{
2323 return;
2324}
2325
b67a1064
ML
2326static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2327{
2328 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2329
2330 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2331 if (want_gen2i)
2332 ifctl |= (1 << 7); /* enable gen2i speed */
2333 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2334}
2335
b562468c
ML
2336/*
2337 * Caller must ensure that EDMA is not active,
2338 * by first doing mv_stop_edma() where needed.
2339 */
e12bef50 2340static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130
JG
2341 unsigned int port_no)
2342{
2343 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2344
0d8be5cb 2345 mv_stop_edma_engine(port_mmio);
c9d39130
JG
2346 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2347
b67a1064
ML
2348 if (!IS_GEN_I(hpriv)) {
2349 /* Enable 3.0gb/s link speed */
2350 mv_setup_ifctl(port_mmio, 1);
c9d39130 2351 }
b67a1064
ML
2352 /*
2353 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2354 * link, and physical layers. It resets all SATA interface registers
2355 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
c9d39130 2356 */
b67a1064
ML
2357 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2358 udelay(25); /* allow reset propagation */
c9d39130
JG
2359 writelfl(0, port_mmio + EDMA_CMD_OFS);
2360
2361 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2362
ee9ccdf7 2363 if (IS_GEN_I(hpriv))
c9d39130
JG
2364 mdelay(1);
2365}
2366
e49856d8 2367static void mv_pmp_select(struct ata_port *ap, int pmp)
20f733e7 2368{
e49856d8
ML
2369 if (sata_pmp_supported(ap)) {
2370 void __iomem *port_mmio = mv_ap_base(ap);
2371 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2372 int old = reg & 0xf;
22374677 2373
e49856d8
ML
2374 if (old != pmp) {
2375 reg = (reg & ~0xf) | pmp;
2376 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2377 }
22374677 2378 }
20f733e7
BR
2379}
2380
e49856d8
ML
2381static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2382 unsigned long deadline)
22374677 2383{
e49856d8
ML
2384 mv_pmp_select(link->ap, sata_srst_pmp(link));
2385 return sata_std_hardreset(link, class, deadline);
2386}
bdd4ddde 2387
e49856d8
ML
2388static int mv_softreset(struct ata_link *link, unsigned int *class,
2389 unsigned long deadline)
2390{
2391 mv_pmp_select(link->ap, sata_srst_pmp(link));
2392 return ata_sff_softreset(link, class, deadline);
22374677
JG
2393}
2394
cc0680a5 2395static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2396 unsigned long deadline)
31961943 2397{
cc0680a5 2398 struct ata_port *ap = link->ap;
bdd4ddde 2399 struct mv_host_priv *hpriv = ap->host->private_data;
b562468c 2400 struct mv_port_priv *pp = ap->private_data;
f351b2d6 2401 void __iomem *mmio = hpriv->base;
0d8be5cb
ML
2402 int rc, attempts = 0, extra = 0;
2403 u32 sstatus;
2404 bool online;
31961943 2405
e12bef50 2406 mv_reset_channel(hpriv, mmio, ap->port_no);
b562468c 2407 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
bdd4ddde 2408
0d8be5cb
ML
2409 /* Workaround for errata FEr SATA#10 (part 2) */
2410 do {
17c5aab5
ML
2411 const unsigned long *timing =
2412 sata_ehc_deb_timing(&link->eh_context);
bdd4ddde 2413
17c5aab5
ML
2414 rc = sata_link_hardreset(link, timing, deadline + extra,
2415 &online, NULL);
2416 if (rc)
0d8be5cb 2417 return rc;
0d8be5cb
ML
2418 sata_scr_read(link, SCR_STATUS, &sstatus);
2419 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2420 /* Force 1.5gb/s link speed and try again */
2421 mv_setup_ifctl(mv_ap_base(ap), 0);
2422 if (time_after(jiffies + HZ, deadline))
2423 extra = HZ; /* only extend it once, max */
2424 }
2425 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
bdd4ddde 2426
17c5aab5 2427 return rc;
bdd4ddde
JG
2428}
2429
bdd4ddde
JG
2430static void mv_eh_freeze(struct ata_port *ap)
2431{
f351b2d6 2432 struct mv_host_priv *hpriv = ap->host->private_data;
1cfd19ae 2433 unsigned int shift, hardport, port = ap->port_no;
352fab70 2434 u32 main_mask;
bdd4ddde
JG
2435
2436 /* FIXME: handle coalescing completion events properly */
2437
1cfd19ae
ML
2438 mv_stop_edma(ap);
2439 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
bdd4ddde 2440
bdd4ddde 2441 /* disable assertion of portN err, done events */
352fab70
ML
2442 main_mask = readl(hpriv->main_mask_reg_addr);
2443 main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2444 writelfl(main_mask, hpriv->main_mask_reg_addr);
bdd4ddde
JG
2445}
2446
2447static void mv_eh_thaw(struct ata_port *ap)
2448{
f351b2d6 2449 struct mv_host_priv *hpriv = ap->host->private_data;
1cfd19ae
ML
2450 unsigned int shift, hardport, port = ap->port_no;
2451 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
bdd4ddde 2452 void __iomem *port_mmio = mv_ap_base(ap);
352fab70 2453 u32 main_mask, hc_irq_cause;
bdd4ddde
JG
2454
2455 /* FIXME: handle coalescing completion events properly */
2456
1cfd19ae 2457 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
bdd4ddde 2458
bdd4ddde
JG
2459 /* clear EDMA errors on this port */
2460 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2461
2462 /* clear pending irq events */
2463 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1cfd19ae
ML
2464 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2465 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
2466
2467 /* enable assertion of portN err, done events */
352fab70
ML
2468 main_mask = readl(hpriv->main_mask_reg_addr);
2469 main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2470 writelfl(main_mask, hpriv->main_mask_reg_addr);
31961943
BR
2471}
2472
05b308e1
BR
2473/**
2474 * mv_port_init - Perform some early initialization on a single port.
2475 * @port: libata data structure storing shadow register addresses
2476 * @port_mmio: base address of the port
2477 *
2478 * Initialize shadow register mmio addresses, clear outstanding
2479 * interrupts on the port, and unmask interrupts for the future
2480 * start of the port.
2481 *
2482 * LOCKING:
2483 * Inherited from caller.
2484 */
31961943 2485static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2486{
0d5ff566 2487 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2488 unsigned serr_ofs;
2489
8b260248 2490 /* PIO related setup
31961943
BR
2491 */
2492 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2493 port->error_addr =
31961943
BR
2494 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2495 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2496 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2497 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2498 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2499 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2500 port->status_addr =
31961943
BR
2501 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2502 /* special case: control/altstatus doesn't have ATA_REG_ address */
2503 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2504
2505 /* unused: */
8d9db2d2 2506 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2507
31961943
BR
2508 /* Clear any currently outstanding port interrupt conditions */
2509 serr_ofs = mv_scr_offset(SCR_ERROR);
2510 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2511 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2512
646a4da5
ML
2513 /* unmask all non-transient EDMA error interrupts */
2514 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2515
8b260248 2516 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2517 readl(port_mmio + EDMA_CFG_OFS),
2518 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2519 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2520}
2521
4447d351 2522static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2523{
4447d351
TH
2524 struct pci_dev *pdev = to_pci_dev(host->dev);
2525 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2526 u32 hp_flags = hpriv->hp_flags;
2527
5796d1c4 2528 switch (board_idx) {
47c2b677
JG
2529 case chip_5080:
2530 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2531 hp_flags |= MV_HP_GEN_I;
47c2b677 2532
44c10138 2533 switch (pdev->revision) {
47c2b677
JG
2534 case 0x1:
2535 hp_flags |= MV_HP_ERRATA_50XXB0;
2536 break;
2537 case 0x3:
2538 hp_flags |= MV_HP_ERRATA_50XXB2;
2539 break;
2540 default:
2541 dev_printk(KERN_WARNING, &pdev->dev,
2542 "Applying 50XXB2 workarounds to unknown rev\n");
2543 hp_flags |= MV_HP_ERRATA_50XXB2;
2544 break;
2545 }
2546 break;
2547
bca1c4eb
JG
2548 case chip_504x:
2549 case chip_508x:
47c2b677 2550 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2551 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2552
44c10138 2553 switch (pdev->revision) {
47c2b677
JG
2554 case 0x0:
2555 hp_flags |= MV_HP_ERRATA_50XXB0;
2556 break;
2557 case 0x3:
2558 hp_flags |= MV_HP_ERRATA_50XXB2;
2559 break;
2560 default:
2561 dev_printk(KERN_WARNING, &pdev->dev,
2562 "Applying B2 workarounds to unknown rev\n");
2563 hp_flags |= MV_HP_ERRATA_50XXB2;
2564 break;
bca1c4eb
JG
2565 }
2566 break;
2567
2568 case chip_604x:
2569 case chip_608x:
47c2b677 2570 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2571 hp_flags |= MV_HP_GEN_II;
47c2b677 2572
44c10138 2573 switch (pdev->revision) {
47c2b677
JG
2574 case 0x7:
2575 hp_flags |= MV_HP_ERRATA_60X1B2;
2576 break;
2577 case 0x9:
2578 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2579 break;
2580 default:
2581 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2582 "Applying B2 workarounds to unknown rev\n");
2583 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2584 break;
2585 }
2586 break;
2587
e4e7b892 2588 case chip_7042:
02a121da 2589 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2590 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2591 (pdev->device == 0x2300 || pdev->device == 0x2310))
2592 {
4e520033
ML
2593 /*
2594 * Highpoint RocketRAID PCIe 23xx series cards:
2595 *
2596 * Unconfigured drives are treated as "Legacy"
2597 * by the BIOS, and it overwrites sector 8 with
2598 * a "Lgcy" metadata block prior to Linux boot.
2599 *
2600 * Configured drives (RAID or JBOD) leave sector 8
2601 * alone, but instead overwrite a high numbered
2602 * sector for the RAID metadata. This sector can
2603 * be determined exactly, by truncating the physical
2604 * drive capacity to a nice even GB value.
2605 *
2606 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2607 *
2608 * Warn the user, lest they think we're just buggy.
2609 */
2610 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2611 " BIOS CORRUPTS DATA on all attached drives,"
2612 " regardless of if/how they are configured."
2613 " BEWARE!\n");
2614 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2615 " use sectors 8-9 on \"Legacy\" drives,"
2616 " and avoid the final two gigabytes on"
2617 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2618 }
e4e7b892
JG
2619 case chip_6042:
2620 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2621 hp_flags |= MV_HP_GEN_IIE;
2622
44c10138 2623 switch (pdev->revision) {
e4e7b892
JG
2624 case 0x0:
2625 hp_flags |= MV_HP_ERRATA_XX42A0;
2626 break;
2627 case 0x1:
2628 hp_flags |= MV_HP_ERRATA_60X1C0;
2629 break;
2630 default:
2631 dev_printk(KERN_WARNING, &pdev->dev,
2632 "Applying 60X1C0 workarounds to unknown rev\n");
2633 hp_flags |= MV_HP_ERRATA_60X1C0;
2634 break;
2635 }
2636 break;
f351b2d6
SB
2637 case chip_soc:
2638 hpriv->ops = &mv_soc_ops;
2639 hp_flags |= MV_HP_ERRATA_60X1C0;
2640 break;
e4e7b892 2641
bca1c4eb 2642 default:
f351b2d6 2643 dev_printk(KERN_ERR, host->dev,
5796d1c4 2644 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2645 return 1;
2646 }
2647
2648 hpriv->hp_flags = hp_flags;
02a121da
ML
2649 if (hp_flags & MV_HP_PCIE) {
2650 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2651 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2652 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2653 } else {
2654 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2655 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2656 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2657 }
bca1c4eb
JG
2658
2659 return 0;
2660}
2661
05b308e1 2662/**
47c2b677 2663 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2664 * @host: ATA host to initialize
2665 * @board_idx: controller index
05b308e1
BR
2666 *
2667 * If possible, do an early global reset of the host. Then do
2668 * our port init and clear/unmask all/relevant host interrupts.
2669 *
2670 * LOCKING:
2671 * Inherited from caller.
2672 */
4447d351 2673static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2674{
2675 int rc = 0, n_hc, port, hc;
4447d351 2676 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 2677 void __iomem *mmio = hpriv->base;
47c2b677 2678
4447d351 2679 rc = mv_chip_id(host, board_idx);
bca1c4eb 2680 if (rc)
352fab70 2681 goto done;
f351b2d6
SB
2682
2683 if (HAS_PCI(host)) {
352fab70
ML
2684 hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2685 hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS;
f351b2d6 2686 } else {
352fab70
ML
2687 hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2688 hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
f351b2d6 2689 }
352fab70
ML
2690
2691 /* global interrupt mask: 0 == mask everything */
f351b2d6 2692 writel(0, hpriv->main_mask_reg_addr);
bca1c4eb 2693
4447d351 2694 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2695
4447d351 2696 for (port = 0; port < host->n_ports; port++)
47c2b677 2697 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2698
c9d39130 2699 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2700 if (rc)
20f733e7 2701 goto done;
20f733e7 2702
522479fb 2703 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 2704 hpriv->ops->reset_bus(host, mmio);
47c2b677 2705 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2706
4447d351 2707 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2708 struct ata_port *ap = host->ports[port];
2a47ce06 2709 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2710
2711 mv_port_init(&ap->ioaddr, port_mmio);
2712
7bb3c529 2713#ifdef CONFIG_PCI
f351b2d6
SB
2714 if (HAS_PCI(host)) {
2715 unsigned int offset = port_mmio - mmio;
2716 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2717 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2718 }
7bb3c529 2719#endif
20f733e7
BR
2720 }
2721
2722 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2723 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2724
2725 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2726 "(before clear)=0x%08x\n", hc,
2727 readl(hc_mmio + HC_CFG_OFS),
2728 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2729
2730 /* Clear any currently outstanding hc interrupt conditions */
2731 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2732 }
2733
f351b2d6
SB
2734 if (HAS_PCI(host)) {
2735 /* Clear any currently outstanding host interrupt conditions */
2736 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943 2737
f351b2d6
SB
2738 /* and unmask interrupt generation for host regs */
2739 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2740 if (IS_GEN_I(hpriv))
2741 writelfl(~HC_MAIN_MASKED_IRQS_5,
2742 hpriv->main_mask_reg_addr);
2743 else
2744 writelfl(~HC_MAIN_MASKED_IRQS,
2745 hpriv->main_mask_reg_addr);
2746
2747 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2748 "PCI int cause/mask=0x%08x/0x%08x\n",
2749 readl(hpriv->main_cause_reg_addr),
2750 readl(hpriv->main_mask_reg_addr),
2751 readl(mmio + hpriv->irq_cause_ofs),
2752 readl(mmio + hpriv->irq_mask_ofs));
2753 } else {
2754 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2755 hpriv->main_mask_reg_addr);
2756 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2757 readl(hpriv->main_cause_reg_addr),
2758 readl(hpriv->main_mask_reg_addr));
2759 }
2760done:
2761 return rc;
2762}
fb621e2f 2763
fbf14e2f
BB
2764static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2765{
2766 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2767 MV_CRQB_Q_SZ, 0);
2768 if (!hpriv->crqb_pool)
2769 return -ENOMEM;
2770
2771 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2772 MV_CRPB_Q_SZ, 0);
2773 if (!hpriv->crpb_pool)
2774 return -ENOMEM;
2775
2776 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2777 MV_SG_TBL_SZ, 0);
2778 if (!hpriv->sg_tbl_pool)
2779 return -ENOMEM;
2780
2781 return 0;
2782}
2783
15a32632
LB
2784static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2785 struct mbus_dram_target_info *dram)
2786{
2787 int i;
2788
2789 for (i = 0; i < 4; i++) {
2790 writel(0, hpriv->base + WINDOW_CTRL(i));
2791 writel(0, hpriv->base + WINDOW_BASE(i));
2792 }
2793
2794 for (i = 0; i < dram->num_cs; i++) {
2795 struct mbus_dram_window *cs = dram->cs + i;
2796
2797 writel(((cs->size - 1) & 0xffff0000) |
2798 (cs->mbus_attr << 8) |
2799 (dram->mbus_dram_target_id << 4) | 1,
2800 hpriv->base + WINDOW_CTRL(i));
2801 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2802 }
2803}
2804
f351b2d6
SB
2805/**
2806 * mv_platform_probe - handle a positive probe of an soc Marvell
2807 * host
2808 * @pdev: platform device found
2809 *
2810 * LOCKING:
2811 * Inherited from caller.
2812 */
2813static int mv_platform_probe(struct platform_device *pdev)
2814{
2815 static int printed_version;
2816 const struct mv_sata_platform_data *mv_platform_data;
2817 const struct ata_port_info *ppi[] =
2818 { &mv_port_info[chip_soc], NULL };
2819 struct ata_host *host;
2820 struct mv_host_priv *hpriv;
2821 struct resource *res;
2822 int n_ports, rc;
20f733e7 2823
f351b2d6
SB
2824 if (!printed_version++)
2825 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 2826
f351b2d6
SB
2827 /*
2828 * Simple resource validation ..
2829 */
2830 if (unlikely(pdev->num_resources != 2)) {
2831 dev_err(&pdev->dev, "invalid number of resources\n");
2832 return -EINVAL;
2833 }
2834
2835 /*
2836 * Get the register base first
2837 */
2838 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2839 if (res == NULL)
2840 return -EINVAL;
2841
2842 /* allocate host */
2843 mv_platform_data = pdev->dev.platform_data;
2844 n_ports = mv_platform_data->n_ports;
2845
2846 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2847 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2848
2849 if (!host || !hpriv)
2850 return -ENOMEM;
2851 host->private_data = hpriv;
2852 hpriv->n_ports = n_ports;
2853
2854 host->iomap = NULL;
f1cb0ea1
SB
2855 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2856 res->end - res->start + 1);
f351b2d6
SB
2857 hpriv->base -= MV_SATAHC0_REG_BASE;
2858
15a32632
LB
2859 /*
2860 * (Re-)program MBUS remapping windows if we are asked to.
2861 */
2862 if (mv_platform_data->dram != NULL)
2863 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2864
fbf14e2f
BB
2865 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2866 if (rc)
2867 return rc;
2868
f351b2d6
SB
2869 /* initialize adapter */
2870 rc = mv_init_host(host, chip_soc);
2871 if (rc)
2872 return rc;
2873
2874 dev_printk(KERN_INFO, &pdev->dev,
2875 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2876 host->n_ports);
2877
2878 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2879 IRQF_SHARED, &mv6_sht);
2880}
2881
2882/*
2883 *
2884 * mv_platform_remove - unplug a platform interface
2885 * @pdev: platform device
2886 *
2887 * A platform bus SATA device has been unplugged. Perform the needed
2888 * cleanup. Also called on module unload for any active devices.
2889 */
2890static int __devexit mv_platform_remove(struct platform_device *pdev)
2891{
2892 struct device *dev = &pdev->dev;
2893 struct ata_host *host = dev_get_drvdata(dev);
f351b2d6
SB
2894
2895 ata_host_detach(host);
f351b2d6 2896 return 0;
20f733e7
BR
2897}
2898
f351b2d6
SB
2899static struct platform_driver mv_platform_driver = {
2900 .probe = mv_platform_probe,
2901 .remove = __devexit_p(mv_platform_remove),
2902 .driver = {
2903 .name = DRV_NAME,
2904 .owner = THIS_MODULE,
2905 },
2906};
2907
2908
7bb3c529 2909#ifdef CONFIG_PCI
f351b2d6
SB
2910static int mv_pci_init_one(struct pci_dev *pdev,
2911 const struct pci_device_id *ent);
2912
7bb3c529
SB
2913
2914static struct pci_driver mv_pci_driver = {
2915 .name = DRV_NAME,
2916 .id_table = mv_pci_tbl,
f351b2d6 2917 .probe = mv_pci_init_one,
7bb3c529
SB
2918 .remove = ata_pci_remove_one,
2919};
2920
2921/*
2922 * module options
2923 */
2924static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2925
2926
2927/* move to PCI layer or libata core? */
2928static int pci_go_64(struct pci_dev *pdev)
2929{
2930 int rc;
2931
2932 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2933 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2934 if (rc) {
2935 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2936 if (rc) {
2937 dev_printk(KERN_ERR, &pdev->dev,
2938 "64-bit DMA enable failed\n");
2939 return rc;
2940 }
2941 }
2942 } else {
2943 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2944 if (rc) {
2945 dev_printk(KERN_ERR, &pdev->dev,
2946 "32-bit DMA enable failed\n");
2947 return rc;
2948 }
2949 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2950 if (rc) {
2951 dev_printk(KERN_ERR, &pdev->dev,
2952 "32-bit consistent DMA enable failed\n");
2953 return rc;
2954 }
2955 }
2956
2957 return rc;
2958}
2959
05b308e1
BR
2960/**
2961 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2962 * @host: ATA host to print info about
05b308e1
BR
2963 *
2964 * FIXME: complete this.
2965 *
2966 * LOCKING:
2967 * Inherited from caller.
2968 */
4447d351 2969static void mv_print_info(struct ata_host *host)
31961943 2970{
4447d351
TH
2971 struct pci_dev *pdev = to_pci_dev(host->dev);
2972 struct mv_host_priv *hpriv = host->private_data;
44c10138 2973 u8 scc;
c1e4fe71 2974 const char *scc_s, *gen;
31961943
BR
2975
2976 /* Use this to determine the HW stepping of the chip so we know
2977 * what errata to workaround
2978 */
31961943
BR
2979 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2980 if (scc == 0)
2981 scc_s = "SCSI";
2982 else if (scc == 0x01)
2983 scc_s = "RAID";
2984 else
c1e4fe71
JG
2985 scc_s = "?";
2986
2987 if (IS_GEN_I(hpriv))
2988 gen = "I";
2989 else if (IS_GEN_II(hpriv))
2990 gen = "II";
2991 else if (IS_GEN_IIE(hpriv))
2992 gen = "IIE";
2993 else
2994 gen = "?";
31961943 2995
a9524a76 2996 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2997 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2998 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2999 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3000}
3001
05b308e1 3002/**
f351b2d6 3003 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
3004 * @pdev: PCI device found
3005 * @ent: PCI device ID entry for the matched host
3006 *
3007 * LOCKING:
3008 * Inherited from caller.
3009 */
f351b2d6
SB
3010static int mv_pci_init_one(struct pci_dev *pdev,
3011 const struct pci_device_id *ent)
20f733e7 3012{
2dcb407e 3013 static int printed_version;
20f733e7 3014 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
3015 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3016 struct ata_host *host;
3017 struct mv_host_priv *hpriv;
3018 int n_ports, rc;
20f733e7 3019
a9524a76
JG
3020 if (!printed_version++)
3021 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 3022
4447d351
TH
3023 /* allocate host */
3024 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3025
3026 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3027 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3028 if (!host || !hpriv)
3029 return -ENOMEM;
3030 host->private_data = hpriv;
f351b2d6 3031 hpriv->n_ports = n_ports;
4447d351
TH
3032
3033 /* acquire resources */
24dc5f33
TH
3034 rc = pcim_enable_device(pdev);
3035 if (rc)
20f733e7 3036 return rc;
20f733e7 3037
0d5ff566
TH
3038 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3039 if (rc == -EBUSY)
24dc5f33 3040 pcim_pin_device(pdev);
0d5ff566 3041 if (rc)
24dc5f33 3042 return rc;
4447d351 3043 host->iomap = pcim_iomap_table(pdev);
f351b2d6 3044 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 3045
d88184fb
JG
3046 rc = pci_go_64(pdev);
3047 if (rc)
3048 return rc;
3049
da2fa9ba
ML
3050 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3051 if (rc)
3052 return rc;
3053
20f733e7 3054 /* initialize adapter */
4447d351 3055 rc = mv_init_host(host, board_idx);
24dc5f33
TH
3056 if (rc)
3057 return rc;
20f733e7 3058
31961943 3059 /* Enable interrupts */
6a59dcf8 3060 if (msi && pci_enable_msi(pdev))
31961943 3061 pci_intx(pdev, 1);
20f733e7 3062
31961943 3063 mv_dump_pci_cfg(pdev, 0x68);
4447d351 3064 mv_print_info(host);
20f733e7 3065
4447d351 3066 pci_set_master(pdev);
ea8b4db9 3067 pci_try_set_mwi(pdev);
4447d351 3068 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 3069 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 3070}
7bb3c529 3071#endif
20f733e7 3072
f351b2d6
SB
3073static int mv_platform_probe(struct platform_device *pdev);
3074static int __devexit mv_platform_remove(struct platform_device *pdev);
3075
20f733e7
BR
3076static int __init mv_init(void)
3077{
7bb3c529
SB
3078 int rc = -ENODEV;
3079#ifdef CONFIG_PCI
3080 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
3081 if (rc < 0)
3082 return rc;
3083#endif
3084 rc = platform_driver_register(&mv_platform_driver);
3085
3086#ifdef CONFIG_PCI
3087 if (rc < 0)
3088 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
3089#endif
3090 return rc;
20f733e7
BR
3091}
3092
3093static void __exit mv_exit(void)
3094{
7bb3c529 3095#ifdef CONFIG_PCI
20f733e7 3096 pci_unregister_driver(&mv_pci_driver);
7bb3c529 3097#endif
f351b2d6 3098 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
3099}
3100
3101MODULE_AUTHOR("Brett Russ");
3102MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3103MODULE_LICENSE("GPL");
3104MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3105MODULE_VERSION(DRV_VERSION);
17c5aab5 3106MODULE_ALIAS("platform:" DRV_NAME);
20f733e7 3107
7bb3c529 3108#ifdef CONFIG_PCI
ddef9bb3
JG
3109module_param(msi, int, 0444);
3110MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
7bb3c529 3111#endif
ddef9bb3 3112
20f733e7
BR
3113module_init(mv_init);
3114module_exit(mv_exit);
This page took 0.720034 seconds and 5 git commands to generate.