Merge remote-tracking branch 'asoc/topic/pcm1681' into asoc-next
[deliverable/linux.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
40f21b11 4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
8b260248 5 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 6 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7 7 *
40f21b11
ML
8 * Originally written by Brett Russ.
9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
10 *
20f733e7
BR
11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
4a05e209 28/*
85afb934
ML
29 * sata_mv TODO list:
30 *
85afb934
ML
31 * --> Develop a low-power-consumption strategy, and implement it.
32 *
2b748a0a 33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
85afb934
ML
34 *
35 * --> [Experiment, Marvell value added] Is it possible to use target
36 * mode to cross-connect two Linux boxes with Marvell cards? If so,
37 * creating LibATA target mode support would be very interesting.
38 *
39 * Target mode, for those without docs, is the ability to directly
40 * connect two SATA ports.
41 */
4a05e209 42
65ad7fef
ML
43/*
44 * 80x1-B2 errata PCI#11:
45 *
46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
47 * should be careful to insert those cards only onto PCI-X bus #0,
48 * and only in device slots 0..7, not higher. The chips may not
49 * work correctly otherwise (note: this is a pretty rare condition).
50 */
51
20f733e7
BR
52#include <linux/kernel.h>
53#include <linux/module.h>
54#include <linux/pci.h>
55#include <linux/init.h>
56#include <linux/blkdev.h>
57#include <linux/delay.h>
58#include <linux/interrupt.h>
8d8b6004 59#include <linux/dmapool.h>
20f733e7 60#include <linux/dma-mapping.h>
a9524a76 61#include <linux/device.h>
c77a2f4e 62#include <linux/clk.h>
f351b2d6
SB
63#include <linux/platform_device.h>
64#include <linux/ata_platform.h>
15a32632 65#include <linux/mbus.h>
c46938cc 66#include <linux/bitops.h>
5a0e3ad6 67#include <linux/gfp.h>
97b414e1
AL
68#include <linux/of.h>
69#include <linux/of_irq.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
6c08772e 72#include <scsi/scsi_device.h>
20f733e7 73#include <linux/libata.h>
20f733e7
BR
74
75#define DRV_NAME "sata_mv"
cae5a29d 76#define DRV_VERSION "1.28"
20f733e7 77
40f21b11
ML
78/*
79 * module options
80 */
81
40f21b11 82#ifdef CONFIG_PCI
13b74085 83static int msi;
40f21b11
ML
84module_param(msi, int, S_IRUGO);
85MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
86#endif
87
2b748a0a
ML
88static int irq_coalescing_io_count;
89module_param(irq_coalescing_io_count, int, S_IRUGO);
90MODULE_PARM_DESC(irq_coalescing_io_count,
91 "IRQ coalescing I/O count threshold (0..255)");
92
93static int irq_coalescing_usecs;
94module_param(irq_coalescing_usecs, int, S_IRUGO);
95MODULE_PARM_DESC(irq_coalescing_usecs,
96 "IRQ coalescing time threshold in usecs");
97
20f733e7
BR
98enum {
99 /* BAR's are enumerated in terms of pci_resource_start() terms */
100 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
101 MV_IO_BAR = 2, /* offset 0x18: IO space */
102 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
103
104 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
105 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
106
2b748a0a
ML
107 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
108 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
109 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
110 MAX_COAL_IO_COUNT = 255, /* completed I/O count */
111
20f733e7 112 MV_PCI_REG_BASE = 0,
615ab953 113
2b748a0a
ML
114 /*
115 * Per-chip ("all ports") interrupt coalescing feature.
116 * This is only for GEN_II / GEN_IIE hardware.
117 *
118 * Coalescing defers the interrupt until either the IO_THRESHOLD
119 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
120 */
cae5a29d
ML
121 COAL_REG_BASE = 0x18000,
122 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
2b748a0a
ML
123 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
124
cae5a29d
ML
125 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
126 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
2b748a0a
ML
127
128 /*
129 * Registers for the (unused here) transaction coalescing feature:
130 */
cae5a29d
ML
131 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
132 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
2b748a0a 133
cae5a29d
ML
134 SATAHC0_REG_BASE = 0x20000,
135 FLASH_CTL = 0x1046c,
136 GPIO_PORT_CTL = 0x104f0,
137 RESET_CFG = 0x180d8,
20f733e7
BR
138
139 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
140 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
141 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
142 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
143
31961943
BR
144 MV_MAX_Q_DEPTH = 32,
145 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
146
147 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
148 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
149 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
150 */
151 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
152 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 153 MV_MAX_SG_CT = 256,
31961943 154 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 155
352fab70 156 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
20f733e7 157 MV_PORT_HC_SHIFT = 2,
352fab70
ML
158 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
159 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
160 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
20f733e7
BR
161
162 /* Host Flags */
163 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
7bb3c529 164
9cbe056f 165 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_POLLING,
ad3aef51 166
91b1a84c 167 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
20f733e7 168
40f21b11
ML
169 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
170 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
91b1a84c
ML
171
172 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
ad3aef51 173
31961943
BR
174 CRQB_FLAG_READ = (1 << 0),
175 CRQB_TAG_SHIFT = 1,
c5d3e45a 176 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
e12bef50 177 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
c5d3e45a 178 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
179 CRQB_CMD_ADDR_SHIFT = 8,
180 CRQB_CMD_CS = (0x2 << 11),
181 CRQB_CMD_LAST = (1 << 15),
182
183 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
184 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
185 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
186
187 EPRD_FLAG_END_OF_TBL = (1 << 31),
188
20f733e7
BR
189 /* PCI interface registers */
190
cae5a29d
ML
191 MV_PCI_COMMAND = 0xc00,
192 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
193 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
31961943 194
cae5a29d 195 PCI_MAIN_CMD_STS = 0xd30,
20f733e7
BR
196 STOP_PCI_MASTER = (1 << 2),
197 PCI_MASTER_EMPTY = (1 << 3),
198 GLOB_SFT_RST = (1 << 4),
199
cae5a29d 200 MV_PCI_MODE = 0xd00,
8e7decdb
ML
201 MV_PCI_MODE_MASK = 0x30,
202
522479fb
JG
203 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
204 MV_PCI_DISC_TIMER = 0xd04,
205 MV_PCI_MSI_TRIGGER = 0xc38,
206 MV_PCI_SERR_MASK = 0xc28,
cae5a29d 207 MV_PCI_XBAR_TMOUT = 0x1d04,
522479fb
JG
208 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
209 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
210 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
211 MV_PCI_ERR_COMMAND = 0x1d50,
212
cae5a29d
ML
213 PCI_IRQ_CAUSE = 0x1d58,
214 PCI_IRQ_MASK = 0x1d5c,
20f733e7
BR
215 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
216
cae5a29d
ML
217 PCIE_IRQ_CAUSE = 0x1900,
218 PCIE_IRQ_MASK = 0x1910,
646a4da5 219 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 220
7368f919 221 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
cae5a29d
ML
222 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
223 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
224 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
225 SOC_HC_MAIN_IRQ_MASK = 0x20024,
40f21b11
ML
226 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
227 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
20f733e7
BR
228 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
229 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
2b748a0a
ML
230 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
231 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
20f733e7 232 PCI_ERR = (1 << 18),
40f21b11
ML
233 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
234 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
235 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
236 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
237 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
20f733e7
BR
238 GPIO_INT = (1 << 22),
239 SELF_INT = (1 << 23),
240 TWSI_INT = (1 << 24),
241 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 242 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
e12bef50 243 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
20f733e7
BR
244
245 /* SATAHC registers */
cae5a29d 246 HC_CFG = 0x00,
20f733e7 247
cae5a29d 248 HC_IRQ_CAUSE = 0x14,
352fab70
ML
249 DMA_IRQ = (1 << 0), /* shift by port # */
250 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
20f733e7
BR
251 DEV_IRQ = (1 << 8), /* shift by port # */
252
2b748a0a
ML
253 /*
254 * Per-HC (Host-Controller) interrupt coalescing feature.
255 * This is present on all chip generations.
256 *
257 * Coalescing defers the interrupt until either the IO_THRESHOLD
258 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
259 */
cae5a29d
ML
260 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
261 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
2b748a0a 262
cae5a29d 263 SOC_LED_CTRL = 0x2c,
000b344f
ML
264 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
265 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
266 /* with dev activity LED */
267
20f733e7 268 /* Shadow block registers */
cae5a29d
ML
269 SHD_BLK = 0x100,
270 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
20f733e7
BR
271
272 /* SATA registers */
cae5a29d
ML
273 SATA_STATUS = 0x300, /* ctrl, err regs follow status */
274 SATA_ACTIVE = 0x350,
275 FIS_IRQ_CAUSE = 0x364,
276 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
17c5aab5 277
cae5a29d 278 LTMODE = 0x30c, /* requires read-after-write */
17c5aab5
ML
279 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
280
cae5a29d 281 PHY_MODE2 = 0x330,
47c2b677 282 PHY_MODE3 = 0x310,
cae5a29d
ML
283
284 PHY_MODE4 = 0x314, /* requires read-after-write */
ba069e37
ML
285 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
286 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
287 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
288 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
289
cae5a29d
ML
290 SATA_IFCTL = 0x344,
291 SATA_TESTCTL = 0x348,
292 SATA_IFSTAT = 0x34c,
293 VENDOR_UNIQUE_FIS = 0x35c,
17c5aab5 294
cae5a29d 295 FISCFG = 0x360,
8e7decdb
ML
296 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
297 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
17c5aab5 298
29b7e43c
MM
299 PHY_MODE9_GEN2 = 0x398,
300 PHY_MODE9_GEN1 = 0x39c,
301 PHYCFG_OFS = 0x3a0, /* only in 65n devices */
302
c9d39130 303 MV5_PHY_MODE = 0x74,
cae5a29d
ML
304 MV5_LTMODE = 0x30,
305 MV5_PHY_CTL = 0x0C,
306 SATA_IFCFG = 0x050,
bca1c4eb
JG
307
308 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
309
310 /* Port registers */
cae5a29d 311 EDMA_CFG = 0,
0c58912e
ML
312 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
313 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
314 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
315 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
316 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
e12bef50
ML
317 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
318 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
20f733e7 319
cae5a29d
ML
320 EDMA_ERR_IRQ_CAUSE = 0x8,
321 EDMA_ERR_IRQ_MASK = 0xc,
6c1153e0
JG
322 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
323 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
324 EDMA_ERR_DEV = (1 << 2), /* device error */
325 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
326 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
327 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
328 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
329 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 330 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 331 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
332 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
333 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
334 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
335 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 336
6c1153e0 337 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
338 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
339 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
340 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
341 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
342
6c1153e0 343 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 344
6c1153e0 345 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
346 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
347 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
348 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
349 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
350 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
351
6c1153e0 352 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 353
6c1153e0 354 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
355 EDMA_ERR_OVERRUN_5 = (1 << 5),
356 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
357
358 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
359 EDMA_ERR_LNK_CTRL_RX_1 |
360 EDMA_ERR_LNK_CTRL_RX_3 |
85afb934 361 EDMA_ERR_LNK_CTRL_TX,
646a4da5 362
bdd4ddde
JG
363 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
364 EDMA_ERR_PRD_PAR |
365 EDMA_ERR_DEV_DCON |
366 EDMA_ERR_DEV_CON |
367 EDMA_ERR_SERR |
368 EDMA_ERR_SELF_DIS |
6c1153e0 369 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
370 EDMA_ERR_CRPB_PAR |
371 EDMA_ERR_INTRL_PAR |
372 EDMA_ERR_IORDY |
373 EDMA_ERR_LNK_CTRL_RX_2 |
374 EDMA_ERR_LNK_DATA_RX |
375 EDMA_ERR_LNK_DATA_TX |
376 EDMA_ERR_TRANS_PROTO,
e12bef50 377
bdd4ddde
JG
378 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
379 EDMA_ERR_PRD_PAR |
380 EDMA_ERR_DEV_DCON |
381 EDMA_ERR_DEV_CON |
382 EDMA_ERR_OVERRUN_5 |
383 EDMA_ERR_UNDERRUN_5 |
384 EDMA_ERR_SELF_DIS_5 |
6c1153e0 385 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
386 EDMA_ERR_CRPB_PAR |
387 EDMA_ERR_INTRL_PAR |
388 EDMA_ERR_IORDY,
20f733e7 389
cae5a29d
ML
390 EDMA_REQ_Q_BASE_HI = 0x10,
391 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
31961943 392
cae5a29d 393 EDMA_REQ_Q_OUT_PTR = 0x18,
31961943
BR
394 EDMA_REQ_Q_PTR_SHIFT = 5,
395
cae5a29d
ML
396 EDMA_RSP_Q_BASE_HI = 0x1c,
397 EDMA_RSP_Q_IN_PTR = 0x20,
398 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
31961943
BR
399 EDMA_RSP_Q_PTR_SHIFT = 3,
400
cae5a29d 401 EDMA_CMD = 0x28, /* EDMA command register */
0ea9e179
JG
402 EDMA_EN = (1 << 0), /* enable EDMA */
403 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
8e7decdb
ML
404 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
405
cae5a29d 406 EDMA_STATUS = 0x30, /* EDMA engine status */
8e7decdb
ML
407 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
408 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
20f733e7 409
cae5a29d
ML
410 EDMA_IORDY_TMOUT = 0x34,
411 EDMA_ARB_CFG = 0x38,
8e7decdb 412
cae5a29d
ML
413 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
414 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
da14265e 415
cae5a29d
ML
416 BMDMA_CMD = 0x224, /* bmdma command register */
417 BMDMA_STATUS = 0x228, /* bmdma status register */
418 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
419 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
da14265e 420
31961943
BR
421 /* Host private flags (hp_flags) */
422 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
423 MV_HP_ERRATA_50XXB0 = (1 << 1),
424 MV_HP_ERRATA_50XXB2 = (1 << 2),
425 MV_HP_ERRATA_60X1B2 = (1 << 3),
426 MV_HP_ERRATA_60X1C0 = (1 << 4),
0ea9e179
JG
427 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
428 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
429 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 430 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
616d4a98 431 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
1f398472 432 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
000b344f 433 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
20f733e7 434
31961943 435 /* Port private flags (pp_flags) */
0ea9e179 436 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 437 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
00f42eab 438 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
29d187bb 439 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
d16ab3f6 440 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
20f733e7
BR
441};
442
ee9ccdf7
JG
443#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
444#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 445#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
8e7decdb 446#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
1f398472 447#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
bca1c4eb 448
15a32632
LB
449#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
450#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
451
095fec88 452enum {
baf14aa1
JG
453 /* DMA boundary 0xffff is required by the s/g splitting
454 * we need on /length/ in mv_fill-sg().
455 */
456 MV_DMA_BOUNDARY = 0xffffU,
095fec88 457
0ea9e179
JG
458 /* mask of register bits containing lower 32 bits
459 * of EDMA request queue DMA address
460 */
095fec88
JG
461 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
462
0ea9e179 463 /* ditto, for response queue */
095fec88
JG
464 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
465};
466
522479fb
JG
467enum chip_type {
468 chip_504x,
469 chip_508x,
470 chip_5080,
471 chip_604x,
472 chip_608x,
e4e7b892
JG
473 chip_6042,
474 chip_7042,
f351b2d6 475 chip_soc,
522479fb
JG
476};
477
31961943
BR
478/* Command ReQuest Block: 32B */
479struct mv_crqb {
e1469874
ML
480 __le32 sg_addr;
481 __le32 sg_addr_hi;
482 __le16 ctrl_flags;
483 __le16 ata_cmd[11];
31961943 484};
20f733e7 485
e4e7b892 486struct mv_crqb_iie {
e1469874
ML
487 __le32 addr;
488 __le32 addr_hi;
489 __le32 flags;
490 __le32 len;
491 __le32 ata_cmd[4];
e4e7b892
JG
492};
493
31961943
BR
494/* Command ResPonse Block: 8B */
495struct mv_crpb {
e1469874
ML
496 __le16 id;
497 __le16 flags;
498 __le32 tmstmp;
20f733e7
BR
499};
500
31961943
BR
501/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
502struct mv_sg {
e1469874
ML
503 __le32 addr;
504 __le32 flags_size;
505 __le32 addr_hi;
506 __le32 reserved;
31961943 507};
20f733e7 508
08da1759
ML
509/*
510 * We keep a local cache of a few frequently accessed port
511 * registers here, to avoid having to read them (very slow)
512 * when switching between EDMA and non-EDMA modes.
513 */
514struct mv_cached_regs {
515 u32 fiscfg;
516 u32 ltmode;
517 u32 haltcond;
c01e8a23 518 u32 unknown_rsvd;
08da1759
ML
519};
520
31961943
BR
521struct mv_port_priv {
522 struct mv_crqb *crqb;
523 dma_addr_t crqb_dma;
524 struct mv_crpb *crpb;
525 dma_addr_t crpb_dma;
eb73d558
ML
526 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
527 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
528
529 unsigned int req_idx;
530 unsigned int resp_idx;
531
31961943 532 u32 pp_flags;
08da1759 533 struct mv_cached_regs cached;
29d187bb 534 unsigned int delayed_eh_pmp_map;
31961943
BR
535};
536
bca1c4eb
JG
537struct mv_port_signal {
538 u32 amps;
539 u32 pre;
540};
541
02a121da
ML
542struct mv_host_priv {
543 u32 hp_flags;
1bfeff03 544 unsigned int board_idx;
96e2c487 545 u32 main_irq_mask;
02a121da
ML
546 struct mv_port_signal signal[8];
547 const struct mv_hw_ops *ops;
f351b2d6
SB
548 int n_ports;
549 void __iomem *base;
7368f919
ML
550 void __iomem *main_irq_cause_addr;
551 void __iomem *main_irq_mask_addr;
cae5a29d
ML
552 u32 irq_cause_offset;
553 u32 irq_mask_offset;
02a121da 554 u32 unmask_all_irqs;
c77a2f4e 555
e0067f0b
EG
556 /*
557 * Needed on some devices that require their clocks to be enabled.
558 * These are optional: if the platform device does not have any
559 * clocks, they won't be used. Also, if the underlying hardware
560 * does not support the common clock framework (CONFIG_HAVE_CLK=n),
561 * all the clock operations become no-ops (see clk.h).
562 */
c77a2f4e 563 struct clk *clk;
eee98990 564 struct clk **port_clks;
da2fa9ba
ML
565 /*
566 * These consistent DMA memory pools give us guaranteed
567 * alignment for hardware-accessed data structures,
568 * and less memory waste in accomplishing the alignment.
569 */
570 struct dma_pool *crqb_pool;
571 struct dma_pool *crpb_pool;
572 struct dma_pool *sg_tbl_pool;
02a121da
ML
573};
574
47c2b677 575struct mv_hw_ops {
2a47ce06
JG
576 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
577 unsigned int port);
47c2b677
JG
578 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
579 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
580 void __iomem *mmio);
c9d39130
JG
581 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
582 unsigned int n_hc);
522479fb 583 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 584 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
585};
586
82ef04fb
TH
587static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
588static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
589static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
590static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
31961943
BR
591static int mv_port_start(struct ata_port *ap);
592static void mv_port_stop(struct ata_port *ap);
3e4a1391 593static int mv_qc_defer(struct ata_queued_cmd *qc);
31961943 594static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 595static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 596static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
a1efdaba
TH
597static int mv_hardreset(struct ata_link *link, unsigned int *class,
598 unsigned long deadline);
bdd4ddde
JG
599static void mv_eh_freeze(struct ata_port *ap);
600static void mv_eh_thaw(struct ata_port *ap);
f273827e 601static void mv6_dev_config(struct ata_device *dev);
20f733e7 602
2a47ce06
JG
603static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
604 unsigned int port);
47c2b677
JG
605static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
606static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
607 void __iomem *mmio);
c9d39130
JG
608static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
609 unsigned int n_hc);
522479fb 610static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 611static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 612
2a47ce06
JG
613static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
614 unsigned int port);
47c2b677
JG
615static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
616static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
617 void __iomem *mmio);
c9d39130
JG
618static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
619 unsigned int n_hc);
522479fb 620static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
621static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
622 void __iomem *mmio);
623static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
624 void __iomem *mmio);
625static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
626 void __iomem *mmio, unsigned int n_hc);
627static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
628 void __iomem *mmio);
629static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
29b7e43c
MM
630static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
631 void __iomem *mmio, unsigned int port);
7bb3c529 632static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
e12bef50 633static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130 634 unsigned int port_no);
e12bef50 635static int mv_stop_edma(struct ata_port *ap);
b562468c 636static int mv_stop_edma_engine(void __iomem *port_mmio);
00b81235 637static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
47c2b677 638
e49856d8
ML
639static void mv_pmp_select(struct ata_port *ap, int pmp);
640static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
641 unsigned long deadline);
642static int mv_softreset(struct ata_link *link, unsigned int *class,
643 unsigned long deadline);
29d187bb 644static void mv_pmp_error_handler(struct ata_port *ap);
4c299ca3
ML
645static void mv_process_crpb_entries(struct ata_port *ap,
646 struct mv_port_priv *pp);
47c2b677 647
da14265e
ML
648static void mv_sff_irq_clear(struct ata_port *ap);
649static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
650static void mv_bmdma_setup(struct ata_queued_cmd *qc);
651static void mv_bmdma_start(struct ata_queued_cmd *qc);
652static void mv_bmdma_stop(struct ata_queued_cmd *qc);
653static u8 mv_bmdma_status(struct ata_port *ap);
d16ab3f6 654static u8 mv_sff_check_status(struct ata_port *ap);
da14265e 655
eb73d558
ML
656/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
657 * because we have to allow room for worst case splitting of
658 * PRDs for 64K boundaries in mv_fill_sg().
659 */
13b74085 660#ifdef CONFIG_PCI
c5d3e45a 661static struct scsi_host_template mv5_sht = {
68d1d07b 662 ATA_BASE_SHT(DRV_NAME),
baf14aa1 663 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a 664 .dma_boundary = MV_DMA_BOUNDARY,
c5d3e45a 665};
13b74085 666#endif
c5d3e45a 667static struct scsi_host_template mv6_sht = {
68d1d07b 668 ATA_NCQ_SHT(DRV_NAME),
138bfdd0 669 .can_queue = MV_MAX_Q_DEPTH - 1,
baf14aa1 670 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7 671 .dma_boundary = MV_DMA_BOUNDARY,
20f733e7
BR
672};
673
029cfd6b
TH
674static struct ata_port_operations mv5_ops = {
675 .inherits = &ata_sff_port_ops,
c9d39130 676
c96f1732
AC
677 .lost_interrupt = ATA_OP_NULL,
678
3e4a1391 679 .qc_defer = mv_qc_defer,
c9d39130
JG
680 .qc_prep = mv_qc_prep,
681 .qc_issue = mv_qc_issue,
c9d39130 682
bdd4ddde
JG
683 .freeze = mv_eh_freeze,
684 .thaw = mv_eh_thaw,
a1efdaba 685 .hardreset = mv_hardreset,
bdd4ddde 686
c9d39130
JG
687 .scr_read = mv5_scr_read,
688 .scr_write = mv5_scr_write,
689
690 .port_start = mv_port_start,
691 .port_stop = mv_port_stop,
c9d39130
JG
692};
693
029cfd6b 694static struct ata_port_operations mv6_ops = {
8930ff25
TH
695 .inherits = &ata_bmdma_port_ops,
696
697 .lost_interrupt = ATA_OP_NULL,
698
699 .qc_defer = mv_qc_defer,
700 .qc_prep = mv_qc_prep,
701 .qc_issue = mv_qc_issue,
702
f273827e 703 .dev_config = mv6_dev_config,
20f733e7 704
8930ff25
TH
705 .freeze = mv_eh_freeze,
706 .thaw = mv_eh_thaw,
707 .hardreset = mv_hardreset,
708 .softreset = mv_softreset,
e49856d8
ML
709 .pmp_hardreset = mv_pmp_hardreset,
710 .pmp_softreset = mv_softreset,
29d187bb 711 .error_handler = mv_pmp_error_handler,
da14265e 712
8930ff25
TH
713 .scr_read = mv_scr_read,
714 .scr_write = mv_scr_write,
715
40f21b11 716 .sff_check_status = mv_sff_check_status,
da14265e
ML
717 .sff_irq_clear = mv_sff_irq_clear,
718 .check_atapi_dma = mv_check_atapi_dma,
719 .bmdma_setup = mv_bmdma_setup,
720 .bmdma_start = mv_bmdma_start,
721 .bmdma_stop = mv_bmdma_stop,
722 .bmdma_status = mv_bmdma_status,
8930ff25
TH
723
724 .port_start = mv_port_start,
725 .port_stop = mv_port_stop,
20f733e7
BR
726};
727
029cfd6b
TH
728static struct ata_port_operations mv_iie_ops = {
729 .inherits = &mv6_ops,
730 .dev_config = ATA_OP_NULL,
e4e7b892 731 .qc_prep = mv_qc_prep_iie,
e4e7b892
JG
732};
733
98ac62de 734static const struct ata_port_info mv_port_info[] = {
20f733e7 735 { /* chip_504x */
91b1a84c 736 .flags = MV_GEN_I_FLAGS,
c361acbc 737 .pio_mask = ATA_PIO4,
bf6263a8 738 .udma_mask = ATA_UDMA6,
c9d39130 739 .port_ops = &mv5_ops,
20f733e7
BR
740 },
741 { /* chip_508x */
91b1a84c 742 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
c361acbc 743 .pio_mask = ATA_PIO4,
bf6263a8 744 .udma_mask = ATA_UDMA6,
c9d39130 745 .port_ops = &mv5_ops,
20f733e7 746 },
47c2b677 747 { /* chip_5080 */
91b1a84c 748 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
c361acbc 749 .pio_mask = ATA_PIO4,
bf6263a8 750 .udma_mask = ATA_UDMA6,
c9d39130 751 .port_ops = &mv5_ops,
47c2b677 752 },
20f733e7 753 { /* chip_604x */
91b1a84c 754 .flags = MV_GEN_II_FLAGS,
c361acbc 755 .pio_mask = ATA_PIO4,
bf6263a8 756 .udma_mask = ATA_UDMA6,
c9d39130 757 .port_ops = &mv6_ops,
20f733e7
BR
758 },
759 { /* chip_608x */
91b1a84c 760 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
c361acbc 761 .pio_mask = ATA_PIO4,
bf6263a8 762 .udma_mask = ATA_UDMA6,
c9d39130 763 .port_ops = &mv6_ops,
20f733e7 764 },
e4e7b892 765 { /* chip_6042 */
91b1a84c 766 .flags = MV_GEN_IIE_FLAGS,
c361acbc 767 .pio_mask = ATA_PIO4,
bf6263a8 768 .udma_mask = ATA_UDMA6,
e4e7b892
JG
769 .port_ops = &mv_iie_ops,
770 },
771 { /* chip_7042 */
91b1a84c 772 .flags = MV_GEN_IIE_FLAGS,
c361acbc 773 .pio_mask = ATA_PIO4,
bf6263a8 774 .udma_mask = ATA_UDMA6,
e4e7b892
JG
775 .port_ops = &mv_iie_ops,
776 },
f351b2d6 777 { /* chip_soc */
91b1a84c 778 .flags = MV_GEN_IIE_FLAGS,
c361acbc 779 .pio_mask = ATA_PIO4,
17c5aab5
ML
780 .udma_mask = ATA_UDMA6,
781 .port_ops = &mv_iie_ops,
f351b2d6 782 },
20f733e7
BR
783};
784
3b7d697d 785static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
786 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
787 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
788 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
789 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
46c5784c
ML
790 /* RocketRAID 1720/174x have different identifiers */
791 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
4462254a
ML
792 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
793 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
2d2744fc
JG
794
795 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
796 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
797 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
798 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
799 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
800
801 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
802
d9f9c6bc
FA
803 /* Adaptec 1430SA */
804 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
805
02a121da 806 /* Marvell 7042 support */
6a3d586d
MT
807 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
808
02a121da
ML
809 /* Highpoint RocketRAID PCIe series */
810 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
811 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
812
2d2744fc 813 { } /* terminate list */
20f733e7
BR
814};
815
47c2b677
JG
816static const struct mv_hw_ops mv5xxx_ops = {
817 .phy_errata = mv5_phy_errata,
818 .enable_leds = mv5_enable_leds,
819 .read_preamp = mv5_read_preamp,
820 .reset_hc = mv5_reset_hc,
522479fb
JG
821 .reset_flash = mv5_reset_flash,
822 .reset_bus = mv5_reset_bus,
47c2b677
JG
823};
824
825static const struct mv_hw_ops mv6xxx_ops = {
826 .phy_errata = mv6_phy_errata,
827 .enable_leds = mv6_enable_leds,
828 .read_preamp = mv6_read_preamp,
829 .reset_hc = mv6_reset_hc,
522479fb
JG
830 .reset_flash = mv6_reset_flash,
831 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
832};
833
f351b2d6
SB
834static const struct mv_hw_ops mv_soc_ops = {
835 .phy_errata = mv6_phy_errata,
836 .enable_leds = mv_soc_enable_leds,
837 .read_preamp = mv_soc_read_preamp,
838 .reset_hc = mv_soc_reset_hc,
839 .reset_flash = mv_soc_reset_flash,
840 .reset_bus = mv_soc_reset_bus,
841};
842
29b7e43c
MM
843static const struct mv_hw_ops mv_soc_65n_ops = {
844 .phy_errata = mv_soc_65n_phy_errata,
845 .enable_leds = mv_soc_enable_leds,
846 .reset_hc = mv_soc_reset_hc,
847 .reset_flash = mv_soc_reset_flash,
848 .reset_bus = mv_soc_reset_bus,
849};
850
20f733e7
BR
851/*
852 * Functions
853 */
854
855static inline void writelfl(unsigned long data, void __iomem *addr)
856{
857 writel(data, addr);
858 (void) readl(addr); /* flush to avoid PCI posted write */
859}
860
c9d39130
JG
861static inline unsigned int mv_hc_from_port(unsigned int port)
862{
863 return port >> MV_PORT_HC_SHIFT;
864}
865
866static inline unsigned int mv_hardport_from_port(unsigned int port)
867{
868 return port & MV_PORT_MASK;
869}
870
1cfd19ae
ML
871/*
872 * Consolidate some rather tricky bit shift calculations.
873 * This is hot-path stuff, so not a function.
874 * Simple code, with two return values, so macro rather than inline.
875 *
876 * port is the sole input, in range 0..7.
7368f919
ML
877 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
878 * hardport is the other output, in range 0..3.
1cfd19ae
ML
879 *
880 * Note that port and hardport may be the same variable in some cases.
881 */
882#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
883{ \
884 shift = mv_hc_from_port(port) * HC_SHIFT; \
885 hardport = mv_hardport_from_port(port); \
886 shift += hardport * 2; \
887}
888
352fab70
ML
889static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
890{
cae5a29d 891 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
352fab70
ML
892}
893
c9d39130
JG
894static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
895 unsigned int port)
896{
897 return mv_hc_base(base, mv_hc_from_port(port));
898}
899
20f733e7
BR
900static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
901{
c9d39130 902 return mv_hc_base_from_port(base, port) +
8b260248 903 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 904 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
905}
906
e12bef50
ML
907static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
908{
909 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
910 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
911
912 return hc_mmio + ofs;
913}
914
f351b2d6
SB
915static inline void __iomem *mv_host_base(struct ata_host *host)
916{
917 struct mv_host_priv *hpriv = host->private_data;
918 return hpriv->base;
919}
920
20f733e7
BR
921static inline void __iomem *mv_ap_base(struct ata_port *ap)
922{
f351b2d6 923 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
924}
925
cca3974e 926static inline int mv_get_hc_count(unsigned long port_flags)
31961943 927{
cca3974e 928 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
929}
930
08da1759
ML
931/**
932 * mv_save_cached_regs - (re-)initialize cached port registers
933 * @ap: the port whose registers we are caching
934 *
935 * Initialize the local cache of port registers,
936 * so that reading them over and over again can
937 * be avoided on the hotter paths of this driver.
938 * This saves a few microseconds each time we switch
939 * to/from EDMA mode to perform (eg.) a drive cache flush.
940 */
941static void mv_save_cached_regs(struct ata_port *ap)
942{
943 void __iomem *port_mmio = mv_ap_base(ap);
944 struct mv_port_priv *pp = ap->private_data;
945
cae5a29d
ML
946 pp->cached.fiscfg = readl(port_mmio + FISCFG);
947 pp->cached.ltmode = readl(port_mmio + LTMODE);
948 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
949 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
08da1759
ML
950}
951
952/**
953 * mv_write_cached_reg - write to a cached port register
954 * @addr: hardware address of the register
955 * @old: pointer to cached value of the register
956 * @new: new value for the register
957 *
958 * Write a new value to a cached register,
959 * but only if the value is different from before.
960 */
961static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
962{
963 if (new != *old) {
12f3b6d7 964 unsigned long laddr;
08da1759 965 *old = new;
12f3b6d7
ML
966 /*
967 * Workaround for 88SX60x1-B2 FEr SATA#13:
968 * Read-after-write is needed to prevent generating 64-bit
969 * write cycles on the PCI bus for SATA interface registers
970 * at offsets ending in 0x4 or 0xc.
971 *
972 * Looks like a lot of fuss, but it avoids an unnecessary
973 * +1 usec read-after-write delay for unaffected registers.
974 */
975 laddr = (long)addr & 0xffff;
976 if (laddr >= 0x300 && laddr <= 0x33c) {
977 laddr &= 0x000f;
978 if (laddr == 0x4 || laddr == 0xc) {
979 writelfl(new, addr); /* read after write */
980 return;
981 }
982 }
983 writel(new, addr); /* unaffected by the errata */
08da1759
ML
984 }
985}
986
c5d3e45a
JG
987static void mv_set_edma_ptrs(void __iomem *port_mmio,
988 struct mv_host_priv *hpriv,
989 struct mv_port_priv *pp)
990{
bdd4ddde
JG
991 u32 index;
992
c5d3e45a
JG
993 /*
994 * initialize request queue
995 */
fcfb1f77
ML
996 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
997 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
bdd4ddde 998
c5d3e45a 999 WARN_ON(pp->crqb_dma & 0x3ff);
cae5a29d 1000 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
bdd4ddde 1001 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
cae5a29d
ML
1002 port_mmio + EDMA_REQ_Q_IN_PTR);
1003 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
c5d3e45a
JG
1004
1005 /*
1006 * initialize response queue
1007 */
fcfb1f77
ML
1008 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
1009 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
bdd4ddde 1010
c5d3e45a 1011 WARN_ON(pp->crpb_dma & 0xff);
cae5a29d
ML
1012 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
1013 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
bdd4ddde 1014 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
cae5a29d 1015 port_mmio + EDMA_RSP_Q_OUT_PTR);
c5d3e45a
JG
1016}
1017
2b748a0a
ML
1018static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1019{
1020 /*
1021 * When writing to the main_irq_mask in hardware,
1022 * we must ensure exclusivity between the interrupt coalescing bits
1023 * and the corresponding individual port DONE_IRQ bits.
1024 *
1025 * Note that this register is really an "IRQ enable" register,
1026 * not an "IRQ mask" register as Marvell's naming might suggest.
1027 */
1028 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1029 mask &= ~DONE_IRQ_0_3;
1030 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1031 mask &= ~DONE_IRQ_4_7;
1032 writelfl(mask, hpriv->main_irq_mask_addr);
1033}
1034
c4de573b
ML
1035static void mv_set_main_irq_mask(struct ata_host *host,
1036 u32 disable_bits, u32 enable_bits)
1037{
1038 struct mv_host_priv *hpriv = host->private_data;
1039 u32 old_mask, new_mask;
1040
96e2c487 1041 old_mask = hpriv->main_irq_mask;
c4de573b 1042 new_mask = (old_mask & ~disable_bits) | enable_bits;
96e2c487
ML
1043 if (new_mask != old_mask) {
1044 hpriv->main_irq_mask = new_mask;
2b748a0a 1045 mv_write_main_irq_mask(new_mask, hpriv);
96e2c487 1046 }
c4de573b
ML
1047}
1048
1049static void mv_enable_port_irqs(struct ata_port *ap,
1050 unsigned int port_bits)
1051{
1052 unsigned int shift, hardport, port = ap->port_no;
1053 u32 disable_bits, enable_bits;
1054
1055 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1056
1057 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1058 enable_bits = port_bits << shift;
1059 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1060}
1061
00b81235
ML
1062static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1063 void __iomem *port_mmio,
1064 unsigned int port_irqs)
1065{
1066 struct mv_host_priv *hpriv = ap->host->private_data;
1067 int hardport = mv_hardport_from_port(ap->port_no);
1068 void __iomem *hc_mmio = mv_hc_base_from_port(
1069 mv_host_base(ap->host), ap->port_no);
1070 u32 hc_irq_cause;
1071
1072 /* clear EDMA event indicators, if any */
cae5a29d 1073 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
00b81235
ML
1074
1075 /* clear pending irq events */
1076 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
cae5a29d 1077 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
00b81235
ML
1078
1079 /* clear FIS IRQ Cause */
1080 if (IS_GEN_IIE(hpriv))
cae5a29d 1081 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
00b81235
ML
1082
1083 mv_enable_port_irqs(ap, port_irqs);
1084}
1085
2b748a0a
ML
1086static void mv_set_irq_coalescing(struct ata_host *host,
1087 unsigned int count, unsigned int usecs)
1088{
1089 struct mv_host_priv *hpriv = host->private_data;
1090 void __iomem *mmio = hpriv->base, *hc_mmio;
1091 u32 coal_enable = 0;
1092 unsigned long flags;
6abf4678 1093 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
2b748a0a
ML
1094 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1095 ALL_PORTS_COAL_DONE;
1096
1097 /* Disable IRQ coalescing if either threshold is zero */
1098 if (!usecs || !count) {
1099 clks = count = 0;
1100 } else {
1101 /* Respect maximum limits of the hardware */
1102 clks = usecs * COAL_CLOCKS_PER_USEC;
1103 if (clks > MAX_COAL_TIME_THRESHOLD)
1104 clks = MAX_COAL_TIME_THRESHOLD;
1105 if (count > MAX_COAL_IO_COUNT)
1106 count = MAX_COAL_IO_COUNT;
1107 }
1108
1109 spin_lock_irqsave(&host->lock, flags);
6abf4678 1110 mv_set_main_irq_mask(host, coal_disable, 0);
2b748a0a 1111
6abf4678 1112 if (is_dual_hc && !IS_GEN_I(hpriv)) {
2b748a0a 1113 /*
6abf4678
ML
1114 * GEN_II/GEN_IIE with dual host controllers:
1115 * one set of global thresholds for the entire chip.
2b748a0a 1116 */
cae5a29d
ML
1117 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1118 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
2b748a0a 1119 /* clear leftover coal IRQ bit */
cae5a29d 1120 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
6abf4678
ML
1121 if (count)
1122 coal_enable = ALL_PORTS_COAL_DONE;
1123 clks = count = 0; /* force clearing of regular regs below */
2b748a0a 1124 }
6abf4678 1125
2b748a0a
ML
1126 /*
1127 * All chips: independent thresholds for each HC on the chip.
1128 */
1129 hc_mmio = mv_hc_base_from_port(mmio, 0);
cae5a29d
ML
1130 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1131 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1132 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
6abf4678
ML
1133 if (count)
1134 coal_enable |= PORTS_0_3_COAL_DONE;
1135 if (is_dual_hc) {
2b748a0a 1136 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
cae5a29d
ML
1137 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1138 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1139 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
6abf4678
ML
1140 if (count)
1141 coal_enable |= PORTS_4_7_COAL_DONE;
2b748a0a 1142 }
2b748a0a 1143
6abf4678 1144 mv_set_main_irq_mask(host, 0, coal_enable);
2b748a0a
ML
1145 spin_unlock_irqrestore(&host->lock, flags);
1146}
1147
05b308e1 1148/**
00b81235 1149 * mv_start_edma - Enable eDMA engine
05b308e1
BR
1150 * @base: port base address
1151 * @pp: port private data
1152 *
beec7dbc
TH
1153 * Verify the local cache of the eDMA state is accurate with a
1154 * WARN_ON.
05b308e1
BR
1155 *
1156 * LOCKING:
1157 * Inherited from caller.
1158 */
00b81235 1159static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
72109168 1160 struct mv_port_priv *pp, u8 protocol)
20f733e7 1161{
72109168
ML
1162 int want_ncq = (protocol == ATA_PROT_NCQ);
1163
1164 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1165 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1166 if (want_ncq != using_ncq)
b562468c 1167 mv_stop_edma(ap);
72109168 1168 }
c5d3e45a 1169 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e 1170 struct mv_host_priv *hpriv = ap->host->private_data;
0c58912e 1171
00b81235 1172 mv_edma_cfg(ap, want_ncq, 1);
0c58912e 1173
f630d562 1174 mv_set_edma_ptrs(port_mmio, hpriv, pp);
00b81235 1175 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
bdd4ddde 1176
cae5a29d 1177 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
afb0edd9
BR
1178 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1179 }
20f733e7
BR
1180}
1181
9b2c4e0b
ML
1182static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1183{
1184 void __iomem *port_mmio = mv_ap_base(ap);
1185 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1186 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1187 int i;
1188
1189 /*
1190 * Wait for the EDMA engine to finish transactions in progress.
c46938cc
ML
1191 * No idea what a good "timeout" value might be, but measurements
1192 * indicate that it often requires hundreds of microseconds
1193 * with two drives in-use. So we use the 15msec value above
1194 * as a rough guess at what even more drives might require.
9b2c4e0b
ML
1195 */
1196 for (i = 0; i < timeout; ++i) {
cae5a29d 1197 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
9b2c4e0b
ML
1198 if ((edma_stat & empty_idle) == empty_idle)
1199 break;
1200 udelay(per_loop);
1201 }
a9a79dfe 1202 /* ata_port_info(ap, "%s: %u+ usecs\n", __func__, i); */
9b2c4e0b
ML
1203}
1204
05b308e1 1205/**
e12bef50 1206 * mv_stop_edma_engine - Disable eDMA engine
b562468c 1207 * @port_mmio: io base address
05b308e1
BR
1208 *
1209 * LOCKING:
1210 * Inherited from caller.
1211 */
b562468c 1212static int mv_stop_edma_engine(void __iomem *port_mmio)
20f733e7 1213{
b562468c 1214 int i;
31961943 1215
b562468c 1216 /* Disable eDMA. The disable bit auto clears. */
cae5a29d 1217 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
8b260248 1218
b562468c
ML
1219 /* Wait for the chip to confirm eDMA is off. */
1220 for (i = 10000; i > 0; i--) {
cae5a29d 1221 u32 reg = readl(port_mmio + EDMA_CMD);
4537deb5 1222 if (!(reg & EDMA_EN))
b562468c
ML
1223 return 0;
1224 udelay(10);
31961943 1225 }
b562468c 1226 return -EIO;
20f733e7
BR
1227}
1228
e12bef50 1229static int mv_stop_edma(struct ata_port *ap)
0ea9e179 1230{
b562468c
ML
1231 void __iomem *port_mmio = mv_ap_base(ap);
1232 struct mv_port_priv *pp = ap->private_data;
66e57a2c 1233 int err = 0;
0ea9e179 1234
b562468c
ML
1235 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1236 return 0;
1237 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
9b2c4e0b 1238 mv_wait_for_edma_empty_idle(ap);
b562468c 1239 if (mv_stop_edma_engine(port_mmio)) {
a9a79dfe 1240 ata_port_err(ap, "Unable to stop eDMA\n");
66e57a2c 1241 err = -EIO;
b562468c 1242 }
66e57a2c
ML
1243 mv_edma_cfg(ap, 0, 0);
1244 return err;
0ea9e179
JG
1245}
1246
8a70f8dc 1247#ifdef ATA_DEBUG
31961943 1248static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 1249{
31961943
BR
1250 int b, w;
1251 for (b = 0; b < bytes; ) {
1252 DPRINTK("%p: ", start + b);
1253 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 1254 printk("%08x ", readl(start + b));
31961943
BR
1255 b += sizeof(u32);
1256 }
1257 printk("\n");
1258 }
31961943 1259}
8a70f8dc 1260#endif
13b74085 1261#if defined(ATA_DEBUG) || defined(CONFIG_PCI)
31961943
BR
1262static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1263{
1264#ifdef ATA_DEBUG
1265 int b, w;
1266 u32 dw;
1267 for (b = 0; b < bytes; ) {
1268 DPRINTK("%02x: ", b);
1269 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
1270 (void) pci_read_config_dword(pdev, b, &dw);
1271 printk("%08x ", dw);
31961943
BR
1272 b += sizeof(u32);
1273 }
1274 printk("\n");
1275 }
1276#endif
1277}
13b74085 1278#endif
31961943
BR
1279static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1280 struct pci_dev *pdev)
1281{
1282#ifdef ATA_DEBUG
8b260248 1283 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
1284 port >> MV_PORT_HC_SHIFT);
1285 void __iomem *port_base;
1286 int start_port, num_ports, p, start_hc, num_hcs, hc;
1287
1288 if (0 > port) {
1289 start_hc = start_port = 0;
1290 num_ports = 8; /* shld be benign for 4 port devs */
1291 num_hcs = 2;
1292 } else {
1293 start_hc = port >> MV_PORT_HC_SHIFT;
1294 start_port = port;
1295 num_ports = num_hcs = 1;
1296 }
8b260248 1297 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
1298 num_ports > 1 ? num_ports - 1 : start_port);
1299
1300 if (NULL != pdev) {
1301 DPRINTK("PCI config space regs:\n");
1302 mv_dump_pci_cfg(pdev, 0x68);
1303 }
1304 DPRINTK("PCI regs:\n");
1305 mv_dump_mem(mmio_base+0xc00, 0x3c);
1306 mv_dump_mem(mmio_base+0xd00, 0x34);
1307 mv_dump_mem(mmio_base+0xf00, 0x4);
1308 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1309 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 1310 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
1311 DPRINTK("HC regs (HC %i):\n", hc);
1312 mv_dump_mem(hc_base, 0x1c);
1313 }
1314 for (p = start_port; p < start_port + num_ports; p++) {
1315 port_base = mv_port_base(mmio_base, p);
2dcb407e 1316 DPRINTK("EDMA regs (port %i):\n", p);
31961943 1317 mv_dump_mem(port_base, 0x54);
2dcb407e 1318 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
1319 mv_dump_mem(port_base+0x300, 0x60);
1320 }
1321#endif
20f733e7
BR
1322}
1323
1324static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1325{
1326 unsigned int ofs;
1327
1328 switch (sc_reg_in) {
1329 case SCR_STATUS:
1330 case SCR_CONTROL:
1331 case SCR_ERROR:
cae5a29d 1332 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
20f733e7
BR
1333 break;
1334 case SCR_ACTIVE:
cae5a29d 1335 ofs = SATA_ACTIVE; /* active is not with the others */
20f733e7
BR
1336 break;
1337 default:
1338 ofs = 0xffffffffU;
1339 break;
1340 }
1341 return ofs;
1342}
1343
82ef04fb 1344static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1345{
1346 unsigned int ofs = mv_scr_offset(sc_reg_in);
1347
da3dbb17 1348 if (ofs != 0xffffffffU) {
82ef04fb 1349 *val = readl(mv_ap_base(link->ap) + ofs);
da3dbb17
TH
1350 return 0;
1351 } else
1352 return -EINVAL;
20f733e7
BR
1353}
1354
82ef04fb 1355static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1356{
1357 unsigned int ofs = mv_scr_offset(sc_reg_in);
1358
da3dbb17 1359 if (ofs != 0xffffffffU) {
20091773
ML
1360 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1361 if (sc_reg_in == SCR_CONTROL) {
1362 /*
1363 * Workaround for 88SX60x1 FEr SATA#26:
1364 *
25985edc 1365 * COMRESETs have to take care not to accidentally
20091773
ML
1366 * put the drive to sleep when writing SCR_CONTROL.
1367 * Setting bits 12..15 prevents this problem.
1368 *
1369 * So if we see an outbound COMMRESET, set those bits.
1370 * Ditto for the followup write that clears the reset.
1371 *
1372 * The proprietary driver does this for
1373 * all chip versions, and so do we.
1374 */
1375 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1376 val |= 0xf000;
1377 }
1378 writelfl(val, addr);
da3dbb17
TH
1379 return 0;
1380 } else
1381 return -EINVAL;
20f733e7
BR
1382}
1383
f273827e
ML
1384static void mv6_dev_config(struct ata_device *adev)
1385{
1386 /*
e49856d8
ML
1387 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1388 *
1389 * Gen-II does not support NCQ over a port multiplier
1390 * (no FIS-based switching).
f273827e 1391 */
e49856d8 1392 if (adev->flags & ATA_DFLAG_NCQ) {
352fab70 1393 if (sata_pmp_attached(adev->link->ap)) {
e49856d8 1394 adev->flags &= ~ATA_DFLAG_NCQ;
a9a79dfe 1395 ata_dev_info(adev,
352fab70 1396 "NCQ disabled for command-based switching\n");
352fab70 1397 }
e49856d8 1398 }
f273827e
ML
1399}
1400
3e4a1391
ML
1401static int mv_qc_defer(struct ata_queued_cmd *qc)
1402{
1403 struct ata_link *link = qc->dev->link;
1404 struct ata_port *ap = link->ap;
1405 struct mv_port_priv *pp = ap->private_data;
1406
29d187bb
ML
1407 /*
1408 * Don't allow new commands if we're in a delayed EH state
1409 * for NCQ and/or FIS-based switching.
1410 */
1411 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1412 return ATA_DEFER_PORT;
159a7ff7
GG
1413
1414 /* PIO commands need exclusive link: no other commands [DMA or PIO]
1415 * can run concurrently.
1416 * set excl_link when we want to send a PIO command in DMA mode
1417 * or a non-NCQ command in NCQ mode.
1418 * When we receive a command from that link, and there are no
1419 * outstanding commands, mark a flag to clear excl_link and let
1420 * the command go through.
1421 */
1422 if (unlikely(ap->excl_link)) {
1423 if (link == ap->excl_link) {
1424 if (ap->nr_active_links)
1425 return ATA_DEFER_PORT;
1426 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1427 return 0;
1428 } else
1429 return ATA_DEFER_PORT;
1430 }
1431
3e4a1391
ML
1432 /*
1433 * If the port is completely idle, then allow the new qc.
1434 */
1435 if (ap->nr_active_links == 0)
1436 return 0;
1437
4bdee6c5
TH
1438 /*
1439 * The port is operating in host queuing mode (EDMA) with NCQ
1440 * enabled, allow multiple NCQ commands. EDMA also allows
1441 * queueing multiple DMA commands but libata core currently
1442 * doesn't allow it.
1443 */
1444 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
159a7ff7
GG
1445 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1446 if (ata_is_ncq(qc->tf.protocol))
1447 return 0;
1448 else {
1449 ap->excl_link = link;
1450 return ATA_DEFER_PORT;
1451 }
1452 }
4bdee6c5 1453
3e4a1391
ML
1454 return ATA_DEFER_PORT;
1455}
1456
08da1759 1457static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
e49856d8 1458{
08da1759
ML
1459 struct mv_port_priv *pp = ap->private_data;
1460 void __iomem *port_mmio;
00f42eab 1461
08da1759
ML
1462 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1463 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1464 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
00f42eab 1465
08da1759
ML
1466 ltmode = *old_ltmode & ~LTMODE_BIT8;
1467 haltcond = *old_haltcond | EDMA_ERR_DEV;
00f42eab
ML
1468
1469 if (want_fbs) {
08da1759
ML
1470 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1471 ltmode = *old_ltmode | LTMODE_BIT8;
4c299ca3 1472 if (want_ncq)
08da1759 1473 haltcond &= ~EDMA_ERR_DEV;
4c299ca3 1474 else
08da1759
ML
1475 fiscfg |= FISCFG_WAIT_DEV_ERR;
1476 } else {
1477 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
e49856d8 1478 }
00f42eab 1479
08da1759 1480 port_mmio = mv_ap_base(ap);
cae5a29d
ML
1481 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1482 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1483 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
f273827e
ML
1484}
1485
dd2890f6
ML
1486static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1487{
1488 struct mv_host_priv *hpriv = ap->host->private_data;
1489 u32 old, new;
1490
1491 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
cae5a29d 1492 old = readl(hpriv->base + GPIO_PORT_CTL);
dd2890f6
ML
1493 if (want_ncq)
1494 new = old | (1 << 22);
1495 else
1496 new = old & ~(1 << 22);
1497 if (new != old)
cae5a29d 1498 writel(new, hpriv->base + GPIO_PORT_CTL);
dd2890f6
ML
1499}
1500
c01e8a23 1501/**
40f21b11
ML
1502 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1503 * @ap: Port being initialized
c01e8a23
ML
1504 *
1505 * There are two DMA modes on these chips: basic DMA, and EDMA.
1506 *
1507 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1508 * of basic DMA on the GEN_IIE versions of the chips.
1509 *
1510 * This bit survives EDMA resets, and must be set for basic DMA
1511 * to function, and should be cleared when EDMA is active.
1512 */
1513static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1514{
1515 struct mv_port_priv *pp = ap->private_data;
1516 u32 new, *old = &pp->cached.unknown_rsvd;
1517
1518 if (enable_bmdma)
1519 new = *old | 1;
1520 else
1521 new = *old & ~1;
cae5a29d 1522 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
c01e8a23
ML
1523}
1524
000b344f
ML
1525/*
1526 * SOC chips have an issue whereby the HDD LEDs don't always blink
1527 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1528 * of the SOC takes care of it, generating a steady blink rate when
1529 * any drive on the chip is active.
1530 *
1531 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1532 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1533 *
1534 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1535 * LED operation works then, and provides better (more accurate) feedback.
1536 *
1537 * Note that this code assumes that an SOC never has more than one HC onboard.
1538 */
1539static void mv_soc_led_blink_enable(struct ata_port *ap)
1540{
1541 struct ata_host *host = ap->host;
1542 struct mv_host_priv *hpriv = host->private_data;
1543 void __iomem *hc_mmio;
1544 u32 led_ctrl;
1545
1546 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1547 return;
1548 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1549 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
cae5a29d
ML
1550 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1551 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
000b344f
ML
1552}
1553
1554static void mv_soc_led_blink_disable(struct ata_port *ap)
1555{
1556 struct ata_host *host = ap->host;
1557 struct mv_host_priv *hpriv = host->private_data;
1558 void __iomem *hc_mmio;
1559 u32 led_ctrl;
1560 unsigned int port;
1561
1562 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1563 return;
1564
1565 /* disable led-blink only if no ports are using NCQ */
1566 for (port = 0; port < hpriv->n_ports; port++) {
1567 struct ata_port *this_ap = host->ports[port];
1568 struct mv_port_priv *pp = this_ap->private_data;
1569
1570 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1571 return;
1572 }
1573
1574 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1575 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
cae5a29d
ML
1576 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1577 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
000b344f
ML
1578}
1579
00b81235 1580static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
e4e7b892 1581{
0c58912e 1582 u32 cfg;
e12bef50
ML
1583 struct mv_port_priv *pp = ap->private_data;
1584 struct mv_host_priv *hpriv = ap->host->private_data;
1585 void __iomem *port_mmio = mv_ap_base(ap);
e4e7b892
JG
1586
1587 /* set up non-NCQ EDMA configuration */
0c58912e 1588 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
d16ab3f6
ML
1589 pp->pp_flags &=
1590 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
e4e7b892 1591
0c58912e 1592 if (IS_GEN_I(hpriv))
e4e7b892
JG
1593 cfg |= (1 << 8); /* enab config burst size mask */
1594
dd2890f6 1595 else if (IS_GEN_II(hpriv)) {
e4e7b892 1596 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
dd2890f6 1597 mv_60x1_errata_sata25(ap, want_ncq);
e4e7b892 1598
dd2890f6 1599 } else if (IS_GEN_IIE(hpriv)) {
00f42eab
ML
1600 int want_fbs = sata_pmp_attached(ap);
1601 /*
1602 * Possible future enhancement:
1603 *
1604 * The chip can use FBS with non-NCQ, if we allow it,
1605 * But first we need to have the error handling in place
1606 * for this mode (datasheet section 7.3.15.4.2.3).
1607 * So disallow non-NCQ FBS for now.
1608 */
1609 want_fbs &= want_ncq;
1610
08da1759 1611 mv_config_fbs(ap, want_ncq, want_fbs);
00f42eab
ML
1612
1613 if (want_fbs) {
1614 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1615 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1616 }
1617
e728eabe 1618 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
00b81235
ML
1619 if (want_edma) {
1620 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1621 if (!IS_SOC(hpriv))
1622 cfg |= (1 << 18); /* enab early completion */
1623 }
616d4a98
ML
1624 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1625 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
c01e8a23 1626 mv_bmdma_enable_iie(ap, !want_edma);
000b344f
ML
1627
1628 if (IS_SOC(hpriv)) {
1629 if (want_ncq)
1630 mv_soc_led_blink_enable(ap);
1631 else
1632 mv_soc_led_blink_disable(ap);
1633 }
e4e7b892
JG
1634 }
1635
72109168
ML
1636 if (want_ncq) {
1637 cfg |= EDMA_CFG_NCQ;
1638 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
00b81235 1639 }
72109168 1640
cae5a29d 1641 writelfl(cfg, port_mmio + EDMA_CFG);
e4e7b892
JG
1642}
1643
da2fa9ba
ML
1644static void mv_port_free_dma_mem(struct ata_port *ap)
1645{
1646 struct mv_host_priv *hpriv = ap->host->private_data;
1647 struct mv_port_priv *pp = ap->private_data;
eb73d558 1648 int tag;
da2fa9ba
ML
1649
1650 if (pp->crqb) {
1651 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1652 pp->crqb = NULL;
1653 }
1654 if (pp->crpb) {
1655 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1656 pp->crpb = NULL;
1657 }
eb73d558
ML
1658 /*
1659 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1660 * For later hardware, we have one unique sg_tbl per NCQ tag.
1661 */
1662 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1663 if (pp->sg_tbl[tag]) {
1664 if (tag == 0 || !IS_GEN_I(hpriv))
1665 dma_pool_free(hpriv->sg_tbl_pool,
1666 pp->sg_tbl[tag],
1667 pp->sg_tbl_dma[tag]);
1668 pp->sg_tbl[tag] = NULL;
1669 }
da2fa9ba
ML
1670 }
1671}
1672
05b308e1
BR
1673/**
1674 * mv_port_start - Port specific init/start routine.
1675 * @ap: ATA channel to manipulate
1676 *
1677 * Allocate and point to DMA memory, init port private memory,
1678 * zero indices.
1679 *
1680 * LOCKING:
1681 * Inherited from caller.
1682 */
31961943
BR
1683static int mv_port_start(struct ata_port *ap)
1684{
cca3974e
JG
1685 struct device *dev = ap->host->dev;
1686 struct mv_host_priv *hpriv = ap->host->private_data;
31961943 1687 struct mv_port_priv *pp;
933cb8e5 1688 unsigned long flags;
dde20207 1689 int tag;
31961943 1690
24dc5f33 1691 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1692 if (!pp)
24dc5f33 1693 return -ENOMEM;
da2fa9ba 1694 ap->private_data = pp;
31961943 1695
da2fa9ba
ML
1696 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1697 if (!pp->crqb)
1698 return -ENOMEM;
1699 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1700
da2fa9ba
ML
1701 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1702 if (!pp->crpb)
1703 goto out_port_free_dma_mem;
1704 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1705
3bd0a70e
ML
1706 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1707 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1708 ap->flags |= ATA_FLAG_AN;
eb73d558
ML
1709 /*
1710 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1711 * For later hardware, we need one unique sg_tbl per NCQ tag.
1712 */
1713 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1714 if (tag == 0 || !IS_GEN_I(hpriv)) {
1715 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1716 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1717 if (!pp->sg_tbl[tag])
1718 goto out_port_free_dma_mem;
1719 } else {
1720 pp->sg_tbl[tag] = pp->sg_tbl[0];
1721 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1722 }
1723 }
933cb8e5
ML
1724
1725 spin_lock_irqsave(ap->lock, flags);
08da1759 1726 mv_save_cached_regs(ap);
66e57a2c 1727 mv_edma_cfg(ap, 0, 0);
933cb8e5
ML
1728 spin_unlock_irqrestore(ap->lock, flags);
1729
31961943 1730 return 0;
da2fa9ba
ML
1731
1732out_port_free_dma_mem:
1733 mv_port_free_dma_mem(ap);
1734 return -ENOMEM;
31961943
BR
1735}
1736
05b308e1
BR
1737/**
1738 * mv_port_stop - Port specific cleanup/stop routine.
1739 * @ap: ATA channel to manipulate
1740 *
1741 * Stop DMA, cleanup port memory.
1742 *
1743 * LOCKING:
cca3974e 1744 * This routine uses the host lock to protect the DMA stop.
05b308e1 1745 */
31961943
BR
1746static void mv_port_stop(struct ata_port *ap)
1747{
933cb8e5
ML
1748 unsigned long flags;
1749
1750 spin_lock_irqsave(ap->lock, flags);
e12bef50 1751 mv_stop_edma(ap);
88e675e1 1752 mv_enable_port_irqs(ap, 0);
933cb8e5 1753 spin_unlock_irqrestore(ap->lock, flags);
da2fa9ba 1754 mv_port_free_dma_mem(ap);
31961943
BR
1755}
1756
05b308e1
BR
1757/**
1758 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1759 * @qc: queued command whose SG list to source from
1760 *
1761 * Populate the SG list and mark the last entry.
1762 *
1763 * LOCKING:
1764 * Inherited from caller.
1765 */
6c08772e 1766static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1767{
1768 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1769 struct scatterlist *sg;
3be6cbd7 1770 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1771 unsigned int si;
31961943 1772
eb73d558 1773 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1774 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1775 dma_addr_t addr = sg_dma_address(sg);
1776 u32 sg_len = sg_dma_len(sg);
22374677 1777
4007b493
OJ
1778 while (sg_len) {
1779 u32 offset = addr & 0xffff;
1780 u32 len = sg_len;
22374677 1781
32cd11a6 1782 if (offset + len > 0x10000)
4007b493
OJ
1783 len = 0x10000 - offset;
1784
1785 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1786 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1787 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
32cd11a6 1788 mv_sg->reserved = 0;
4007b493
OJ
1789
1790 sg_len -= len;
1791 addr += len;
1792
3be6cbd7 1793 last_sg = mv_sg;
4007b493 1794 mv_sg++;
4007b493 1795 }
31961943 1796 }
3be6cbd7
JG
1797
1798 if (likely(last_sg))
1799 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
32cd11a6 1800 mb(); /* ensure data structure is visible to the chipset */
31961943
BR
1801}
1802
5796d1c4 1803static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1804{
559eedad 1805 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1806 (last ? CRQB_CMD_LAST : 0);
559eedad 1807 *cmdw = cpu_to_le16(tmp);
31961943
BR
1808}
1809
da14265e
ML
1810/**
1811 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1812 * @ap: Port associated with this ATA transaction.
1813 *
1814 * We need this only for ATAPI bmdma transactions,
1815 * as otherwise we experience spurious interrupts
1816 * after libata-sff handles the bmdma interrupts.
1817 */
1818static void mv_sff_irq_clear(struct ata_port *ap)
1819{
1820 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1821}
1822
1823/**
1824 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1825 * @qc: queued command to check for chipset/DMA compatibility.
1826 *
1827 * The bmdma engines cannot handle speculative data sizes
1828 * (bytecount under/over flow). So only allow DMA for
1829 * data transfer commands with known data sizes.
1830 *
1831 * LOCKING:
1832 * Inherited from caller.
1833 */
1834static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1835{
1836 struct scsi_cmnd *scmd = qc->scsicmd;
1837
1838 if (scmd) {
1839 switch (scmd->cmnd[0]) {
1840 case READ_6:
1841 case READ_10:
1842 case READ_12:
1843 case WRITE_6:
1844 case WRITE_10:
1845 case WRITE_12:
1846 case GPCMD_READ_CD:
1847 case GPCMD_SEND_DVD_STRUCTURE:
1848 case GPCMD_SEND_CUE_SHEET:
1849 return 0; /* DMA is safe */
1850 }
1851 }
1852 return -EOPNOTSUPP; /* use PIO instead */
1853}
1854
1855/**
1856 * mv_bmdma_setup - Set up BMDMA transaction
1857 * @qc: queued command to prepare DMA for.
1858 *
1859 * LOCKING:
1860 * Inherited from caller.
1861 */
1862static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1863{
1864 struct ata_port *ap = qc->ap;
1865 void __iomem *port_mmio = mv_ap_base(ap);
1866 struct mv_port_priv *pp = ap->private_data;
1867
1868 mv_fill_sg(qc);
1869
1870 /* clear all DMA cmd bits */
cae5a29d 1871 writel(0, port_mmio + BMDMA_CMD);
da14265e
ML
1872
1873 /* load PRD table addr. */
1874 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
cae5a29d 1875 port_mmio + BMDMA_PRD_HIGH);
da14265e 1876 writelfl(pp->sg_tbl_dma[qc->tag],
cae5a29d 1877 port_mmio + BMDMA_PRD_LOW);
da14265e
ML
1878
1879 /* issue r/w command */
1880 ap->ops->sff_exec_command(ap, &qc->tf);
1881}
1882
1883/**
1884 * mv_bmdma_start - Start a BMDMA transaction
1885 * @qc: queued command to start DMA on.
1886 *
1887 * LOCKING:
1888 * Inherited from caller.
1889 */
1890static void mv_bmdma_start(struct ata_queued_cmd *qc)
1891{
1892 struct ata_port *ap = qc->ap;
1893 void __iomem *port_mmio = mv_ap_base(ap);
1894 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1895 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1896
1897 /* start host DMA transaction */
cae5a29d 1898 writelfl(cmd, port_mmio + BMDMA_CMD);
da14265e
ML
1899}
1900
1901/**
1902 * mv_bmdma_stop - Stop BMDMA transfer
1903 * @qc: queued command to stop DMA on.
1904 *
1905 * Clears the ATA_DMA_START flag in the bmdma control register
1906 *
1907 * LOCKING:
1908 * Inherited from caller.
1909 */
44b73380 1910static void mv_bmdma_stop_ap(struct ata_port *ap)
da14265e 1911{
da14265e
ML
1912 void __iomem *port_mmio = mv_ap_base(ap);
1913 u32 cmd;
1914
1915 /* clear start/stop bit */
cae5a29d 1916 cmd = readl(port_mmio + BMDMA_CMD);
44b73380
ML
1917 if (cmd & ATA_DMA_START) {
1918 cmd &= ~ATA_DMA_START;
1919 writelfl(cmd, port_mmio + BMDMA_CMD);
1920
1921 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1922 ata_sff_dma_pause(ap);
1923 }
1924}
da14265e 1925
44b73380
ML
1926static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1927{
1928 mv_bmdma_stop_ap(qc->ap);
da14265e
ML
1929}
1930
1931/**
1932 * mv_bmdma_status - Read BMDMA status
1933 * @ap: port for which to retrieve DMA status.
1934 *
1935 * Read and return equivalent of the sff BMDMA status register.
1936 *
1937 * LOCKING:
1938 * Inherited from caller.
1939 */
1940static u8 mv_bmdma_status(struct ata_port *ap)
1941{
1942 void __iomem *port_mmio = mv_ap_base(ap);
1943 u32 reg, status;
1944
1945 /*
1946 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1947 * and the ATA_DMA_INTR bit doesn't exist.
1948 */
cae5a29d 1949 reg = readl(port_mmio + BMDMA_STATUS);
da14265e
ML
1950 if (reg & ATA_DMA_ACTIVE)
1951 status = ATA_DMA_ACTIVE;
44b73380 1952 else if (reg & ATA_DMA_ERR)
da14265e 1953 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
44b73380
ML
1954 else {
1955 /*
1956 * Just because DMA_ACTIVE is 0 (DMA completed),
1957 * this does _not_ mean the device is "done".
1958 * So we should not yet be signalling ATA_DMA_INTR
1959 * in some cases. Eg. DSM/TRIM, and perhaps others.
1960 */
1961 mv_bmdma_stop_ap(ap);
1962 if (ioread8(ap->ioaddr.altstatus_addr) & ATA_BUSY)
1963 status = 0;
1964 else
1965 status = ATA_DMA_INTR;
1966 }
da14265e
ML
1967 return status;
1968}
1969
299b3f8d
ML
1970static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1971{
1972 struct ata_taskfile *tf = &qc->tf;
1973 /*
1974 * Workaround for 88SX60x1 FEr SATA#24.
1975 *
1976 * Chip may corrupt WRITEs if multi_count >= 4kB.
1977 * Note that READs are unaffected.
1978 *
1979 * It's not clear if this errata really means "4K bytes",
1980 * or if it always happens for multi_count > 7
1981 * regardless of device sector_size.
1982 *
1983 * So, for safety, any write with multi_count > 7
1984 * gets converted here into a regular PIO write instead:
1985 */
1986 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1987 if (qc->dev->multi_count > 7) {
1988 switch (tf->command) {
1989 case ATA_CMD_WRITE_MULTI:
1990 tf->command = ATA_CMD_PIO_WRITE;
1991 break;
1992 case ATA_CMD_WRITE_MULTI_FUA_EXT:
1993 tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
1994 /* fall through */
1995 case ATA_CMD_WRITE_MULTI_EXT:
1996 tf->command = ATA_CMD_PIO_WRITE_EXT;
1997 break;
1998 }
1999 }
2000 }
2001}
2002
05b308e1
BR
2003/**
2004 * mv_qc_prep - Host specific command preparation.
2005 * @qc: queued command to prepare
2006 *
2007 * This routine simply redirects to the general purpose routine
2008 * if command is not DMA. Else, it handles prep of the CRQB
2009 * (command request block), does some sanity checking, and calls
2010 * the SG load routine.
2011 *
2012 * LOCKING:
2013 * Inherited from caller.
2014 */
31961943
BR
2015static void mv_qc_prep(struct ata_queued_cmd *qc)
2016{
2017 struct ata_port *ap = qc->ap;
2018 struct mv_port_priv *pp = ap->private_data;
e1469874 2019 __le16 *cw;
8d2b450d 2020 struct ata_taskfile *tf = &qc->tf;
31961943 2021 u16 flags = 0;
a6432436 2022 unsigned in_index;
31961943 2023
299b3f8d
ML
2024 switch (tf->protocol) {
2025 case ATA_PROT_DMA:
44b73380
ML
2026 if (tf->command == ATA_CMD_DSM)
2027 return;
2028 /* fall-thru */
299b3f8d
ML
2029 case ATA_PROT_NCQ:
2030 break; /* continue below */
2031 case ATA_PROT_PIO:
2032 mv_rw_multi_errata_sata24(qc);
31961943 2033 return;
299b3f8d
ML
2034 default:
2035 return;
2036 }
20f733e7 2037
31961943
BR
2038 /* Fill in command request block
2039 */
8d2b450d 2040 if (!(tf->flags & ATA_TFLAG_WRITE))
31961943 2041 flags |= CRQB_FLAG_READ;
beec7dbc 2042 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 2043 flags |= qc->tag << CRQB_TAG_SHIFT;
e49856d8 2044 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
31961943 2045
bdd4ddde 2046 /* get current queue index from software */
fcfb1f77 2047 in_index = pp->req_idx;
a6432436
ML
2048
2049 pp->crqb[in_index].sg_addr =
eb73d558 2050 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 2051 pp->crqb[in_index].sg_addr_hi =
eb73d558 2052 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 2053 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 2054
a6432436 2055 cw = &pp->crqb[in_index].ata_cmd[0];
31961943 2056
25985edc 2057 /* Sadly, the CRQB cannot accommodate all registers--there are
31961943
BR
2058 * only 11 bytes...so we must pick and choose required
2059 * registers based on the command. So, we drop feature and
2060 * hob_feature for [RW] DMA commands, but they are needed for
cd12e1f7
ML
2061 * NCQ. NCQ will drop hob_nsect, which is not needed there
2062 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
20f733e7 2063 */
31961943
BR
2064 switch (tf->command) {
2065 case ATA_CMD_READ:
2066 case ATA_CMD_READ_EXT:
2067 case ATA_CMD_WRITE:
2068 case ATA_CMD_WRITE_EXT:
c15d85c8 2069 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
2070 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2071 break;
31961943
BR
2072 case ATA_CMD_FPDMA_READ:
2073 case ATA_CMD_FPDMA_WRITE:
8b260248 2074 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
2075 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2076 break;
31961943
BR
2077 default:
2078 /* The only other commands EDMA supports in non-queued and
2079 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2080 * of which are defined/used by Linux. If we get here, this
2081 * driver needs work.
2082 *
2083 * FIXME: modify libata to give qc_prep a return value and
2084 * return error here.
2085 */
2086 BUG_ON(tf->command);
2087 break;
2088 }
2089 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2090 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2091 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2092 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2093 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2094 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2095 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2096 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2097 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
2098
e4e7b892
JG
2099 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2100 return;
2101 mv_fill_sg(qc);
2102}
2103
2104/**
2105 * mv_qc_prep_iie - Host specific command preparation.
2106 * @qc: queued command to prepare
2107 *
2108 * This routine simply redirects to the general purpose routine
2109 * if command is not DMA. Else, it handles prep of the CRQB
2110 * (command request block), does some sanity checking, and calls
2111 * the SG load routine.
2112 *
2113 * LOCKING:
2114 * Inherited from caller.
2115 */
2116static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2117{
2118 struct ata_port *ap = qc->ap;
2119 struct mv_port_priv *pp = ap->private_data;
2120 struct mv_crqb_iie *crqb;
8d2b450d 2121 struct ata_taskfile *tf = &qc->tf;
a6432436 2122 unsigned in_index;
e4e7b892
JG
2123 u32 flags = 0;
2124
8d2b450d
ML
2125 if ((tf->protocol != ATA_PROT_DMA) &&
2126 (tf->protocol != ATA_PROT_NCQ))
e4e7b892 2127 return;
44b73380
ML
2128 if (tf->command == ATA_CMD_DSM)
2129 return; /* use bmdma for this */
e4e7b892 2130
e12bef50 2131 /* Fill in Gen IIE command request block */
8d2b450d 2132 if (!(tf->flags & ATA_TFLAG_WRITE))
e4e7b892
JG
2133 flags |= CRQB_FLAG_READ;
2134
beec7dbc 2135 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 2136 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 2137 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e49856d8 2138 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
e4e7b892 2139
bdd4ddde 2140 /* get current queue index from software */
fcfb1f77 2141 in_index = pp->req_idx;
a6432436
ML
2142
2143 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
2144 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2145 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
2146 crqb->flags = cpu_to_le32(flags);
2147
e4e7b892
JG
2148 crqb->ata_cmd[0] = cpu_to_le32(
2149 (tf->command << 16) |
2150 (tf->feature << 24)
2151 );
2152 crqb->ata_cmd[1] = cpu_to_le32(
2153 (tf->lbal << 0) |
2154 (tf->lbam << 8) |
2155 (tf->lbah << 16) |
2156 (tf->device << 24)
2157 );
2158 crqb->ata_cmd[2] = cpu_to_le32(
2159 (tf->hob_lbal << 0) |
2160 (tf->hob_lbam << 8) |
2161 (tf->hob_lbah << 16) |
2162 (tf->hob_feature << 24)
2163 );
2164 crqb->ata_cmd[3] = cpu_to_le32(
2165 (tf->nsect << 0) |
2166 (tf->hob_nsect << 8)
2167 );
2168
2169 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 2170 return;
31961943
BR
2171 mv_fill_sg(qc);
2172}
2173
d16ab3f6
ML
2174/**
2175 * mv_sff_check_status - fetch device status, if valid
2176 * @ap: ATA port to fetch status from
2177 *
2178 * When using command issue via mv_qc_issue_fis(),
2179 * the initial ATA_BUSY state does not show up in the
2180 * ATA status (shadow) register. This can confuse libata!
2181 *
2182 * So we have a hook here to fake ATA_BUSY for that situation,
2183 * until the first time a BUSY, DRQ, or ERR bit is seen.
2184 *
2185 * The rest of the time, it simply returns the ATA status register.
2186 */
2187static u8 mv_sff_check_status(struct ata_port *ap)
2188{
2189 u8 stat = ioread8(ap->ioaddr.status_addr);
2190 struct mv_port_priv *pp = ap->private_data;
2191
2192 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2193 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2194 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2195 else
2196 stat = ATA_BUSY;
2197 }
2198 return stat;
2199}
2200
70f8b79c
ML
2201/**
2202 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2203 * @fis: fis to be sent
2204 * @nwords: number of 32-bit words in the fis
2205 */
2206static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2207{
2208 void __iomem *port_mmio = mv_ap_base(ap);
2209 u32 ifctl, old_ifctl, ifstat;
2210 int i, timeout = 200, final_word = nwords - 1;
2211
2212 /* Initiate FIS transmission mode */
cae5a29d 2213 old_ifctl = readl(port_mmio + SATA_IFCTL);
70f8b79c 2214 ifctl = 0x100 | (old_ifctl & 0xf);
cae5a29d 2215 writelfl(ifctl, port_mmio + SATA_IFCTL);
70f8b79c
ML
2216
2217 /* Send all words of the FIS except for the final word */
2218 for (i = 0; i < final_word; ++i)
cae5a29d 2219 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
70f8b79c
ML
2220
2221 /* Flag end-of-transmission, and then send the final word */
cae5a29d
ML
2222 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2223 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
70f8b79c
ML
2224
2225 /*
2226 * Wait for FIS transmission to complete.
2227 * This typically takes just a single iteration.
2228 */
2229 do {
cae5a29d 2230 ifstat = readl(port_mmio + SATA_IFSTAT);
70f8b79c
ML
2231 } while (!(ifstat & 0x1000) && --timeout);
2232
2233 /* Restore original port configuration */
cae5a29d 2234 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
70f8b79c
ML
2235
2236 /* See if it worked */
2237 if ((ifstat & 0x3000) != 0x1000) {
a9a79dfe
JP
2238 ata_port_warn(ap, "%s transmission error, ifstat=%08x\n",
2239 __func__, ifstat);
70f8b79c
ML
2240 return AC_ERR_OTHER;
2241 }
2242 return 0;
2243}
2244
2245/**
2246 * mv_qc_issue_fis - Issue a command directly as a FIS
2247 * @qc: queued command to start
2248 *
2249 * Note that the ATA shadow registers are not updated
2250 * after command issue, so the device will appear "READY"
2251 * if polled, even while it is BUSY processing the command.
2252 *
2253 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2254 *
2255 * Note: we don't get updated shadow regs on *completion*
2256 * of non-data commands. So avoid sending them via this function,
2257 * as they will appear to have completed immediately.
2258 *
2259 * GEN_IIE has special registers that we could get the result tf from,
2260 * but earlier chipsets do not. For now, we ignore those registers.
2261 */
2262static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2263{
2264 struct ata_port *ap = qc->ap;
2265 struct mv_port_priv *pp = ap->private_data;
2266 struct ata_link *link = qc->dev->link;
2267 u32 fis[5];
2268 int err = 0;
2269
2270 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
4c4a90fd 2271 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
70f8b79c
ML
2272 if (err)
2273 return err;
2274
2275 switch (qc->tf.protocol) {
2276 case ATAPI_PROT_PIO:
2277 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2278 /* fall through */
2279 case ATAPI_PROT_NODATA:
2280 ap->hsm_task_state = HSM_ST_FIRST;
2281 break;
2282 case ATA_PROT_PIO:
2283 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2284 if (qc->tf.flags & ATA_TFLAG_WRITE)
2285 ap->hsm_task_state = HSM_ST_FIRST;
2286 else
2287 ap->hsm_task_state = HSM_ST;
2288 break;
2289 default:
2290 ap->hsm_task_state = HSM_ST_LAST;
2291 break;
2292 }
2293
2294 if (qc->tf.flags & ATA_TFLAG_POLLING)
ea3c6450 2295 ata_sff_queue_pio_task(link, 0);
70f8b79c
ML
2296 return 0;
2297}
2298
05b308e1
BR
2299/**
2300 * mv_qc_issue - Initiate a command to the host
2301 * @qc: queued command to start
2302 *
2303 * This routine simply redirects to the general purpose routine
2304 * if command is not DMA. Else, it sanity checks our local
2305 * caches of the request producer/consumer indices then enables
2306 * DMA and bumps the request producer index.
2307 *
2308 * LOCKING:
2309 * Inherited from caller.
2310 */
9a3d9eb0 2311static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 2312{
f48765cc 2313 static int limit_warnings = 10;
c5d3e45a
JG
2314 struct ata_port *ap = qc->ap;
2315 void __iomem *port_mmio = mv_ap_base(ap);
2316 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 2317 u32 in_index;
42ed893d 2318 unsigned int port_irqs;
f48765cc 2319
d16ab3f6
ML
2320 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2321
f48765cc
ML
2322 switch (qc->tf.protocol) {
2323 case ATA_PROT_DMA:
44b73380
ML
2324 if (qc->tf.command == ATA_CMD_DSM) {
2325 if (!ap->ops->bmdma_setup) /* no bmdma on GEN_I */
2326 return AC_ERR_OTHER;
2327 break; /* use bmdma for this */
2328 }
2329 /* fall thru */
f48765cc
ML
2330 case ATA_PROT_NCQ:
2331 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2332 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2333 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2334
2335 /* Write the request in pointer to kick the EDMA to life */
2336 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
cae5a29d 2337 port_mmio + EDMA_REQ_Q_IN_PTR);
f48765cc 2338 return 0;
31961943 2339
f48765cc 2340 case ATA_PROT_PIO:
c6112bd8
ML
2341 /*
2342 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2343 *
2344 * Someday, we might implement special polling workarounds
2345 * for these, but it all seems rather unnecessary since we
2346 * normally use only DMA for commands which transfer more
2347 * than a single block of data.
2348 *
2349 * Much of the time, this could just work regardless.
2350 * So for now, just log the incident, and allow the attempt.
2351 */
c7843e8f 2352 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
c6112bd8 2353 --limit_warnings;
a9a79dfe
JP
2354 ata_link_warn(qc->dev->link, DRV_NAME
2355 ": attempting PIO w/multiple DRQ: "
2356 "this may fail due to h/w errata\n");
c6112bd8 2357 }
f48765cc 2358 /* drop through */
42ed893d 2359 case ATA_PROT_NODATA:
f48765cc 2360 case ATAPI_PROT_PIO:
42ed893d
ML
2361 case ATAPI_PROT_NODATA:
2362 if (ap->flags & ATA_FLAG_PIO_POLLING)
2363 qc->tf.flags |= ATA_TFLAG_POLLING;
2364 break;
31961943 2365 }
42ed893d
ML
2366
2367 if (qc->tf.flags & ATA_TFLAG_POLLING)
2368 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
2369 else
2370 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2371
2372 /*
2373 * We're about to send a non-EDMA capable command to the
2374 * port. Turn off EDMA so there won't be problems accessing
2375 * shadow block, etc registers.
2376 */
2377 mv_stop_edma(ap);
2378 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2379 mv_pmp_select(ap, qc->dev->link->pmp);
70f8b79c
ML
2380
2381 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2382 struct mv_host_priv *hpriv = ap->host->private_data;
2383 /*
2384 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
40f21b11 2385 *
70f8b79c
ML
2386 * After any NCQ error, the READ_LOG_EXT command
2387 * from libata-eh *must* use mv_qc_issue_fis().
2388 * Otherwise it might fail, due to chip errata.
2389 *
2390 * Rather than special-case it, we'll just *always*
2391 * use this method here for READ_LOG_EXT, making for
2392 * easier testing.
2393 */
2394 if (IS_GEN_II(hpriv))
2395 return mv_qc_issue_fis(qc);
2396 }
360ff783 2397 return ata_bmdma_qc_issue(qc);
31961943
BR
2398}
2399
8f767f8a
ML
2400static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2401{
2402 struct mv_port_priv *pp = ap->private_data;
2403 struct ata_queued_cmd *qc;
2404
2405 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2406 return NULL;
2407 qc = ata_qc_from_tag(ap, ap->link.active_tag);
3e4ec344
TH
2408 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2409 return qc;
2410 return NULL;
8f767f8a
ML
2411}
2412
29d187bb
ML
2413static void mv_pmp_error_handler(struct ata_port *ap)
2414{
2415 unsigned int pmp, pmp_map;
2416 struct mv_port_priv *pp = ap->private_data;
2417
2418 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2419 /*
2420 * Perform NCQ error analysis on failed PMPs
2421 * before we freeze the port entirely.
2422 *
2423 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2424 */
2425 pmp_map = pp->delayed_eh_pmp_map;
2426 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2427 for (pmp = 0; pmp_map != 0; pmp++) {
2428 unsigned int this_pmp = (1 << pmp);
2429 if (pmp_map & this_pmp) {
2430 struct ata_link *link = &ap->pmp_link[pmp];
2431 pmp_map &= ~this_pmp;
2432 ata_eh_analyze_ncq_error(link);
2433 }
2434 }
2435 ata_port_freeze(ap);
2436 }
2437 sata_pmp_error_handler(ap);
2438}
2439
4c299ca3
ML
2440static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2441{
2442 void __iomem *port_mmio = mv_ap_base(ap);
2443
cae5a29d 2444 return readl(port_mmio + SATA_TESTCTL) >> 16;
4c299ca3
ML
2445}
2446
4c299ca3
ML
2447static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2448{
2449 struct ata_eh_info *ehi;
2450 unsigned int pmp;
2451
2452 /*
2453 * Initialize EH info for PMPs which saw device errors
2454 */
2455 ehi = &ap->link.eh_info;
2456 for (pmp = 0; pmp_map != 0; pmp++) {
2457 unsigned int this_pmp = (1 << pmp);
2458 if (pmp_map & this_pmp) {
2459 struct ata_link *link = &ap->pmp_link[pmp];
2460
2461 pmp_map &= ~this_pmp;
2462 ehi = &link->eh_info;
2463 ata_ehi_clear_desc(ehi);
2464 ata_ehi_push_desc(ehi, "dev err");
2465 ehi->err_mask |= AC_ERR_DEV;
2466 ehi->action |= ATA_EH_RESET;
2467 ata_link_abort(link);
2468 }
2469 }
2470}
2471
06aaca3f
ML
2472static int mv_req_q_empty(struct ata_port *ap)
2473{
2474 void __iomem *port_mmio = mv_ap_base(ap);
2475 u32 in_ptr, out_ptr;
2476
cae5a29d 2477 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
06aaca3f 2478 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
cae5a29d 2479 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
06aaca3f
ML
2480 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2481 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2482}
2483
4c299ca3
ML
2484static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2485{
2486 struct mv_port_priv *pp = ap->private_data;
2487 int failed_links;
2488 unsigned int old_map, new_map;
2489
2490 /*
2491 * Device error during FBS+NCQ operation:
2492 *
2493 * Set a port flag to prevent further I/O being enqueued.
2494 * Leave the EDMA running to drain outstanding commands from this port.
2495 * Perform the post-mortem/EH only when all responses are complete.
2496 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2497 */
2498 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2499 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2500 pp->delayed_eh_pmp_map = 0;
2501 }
2502 old_map = pp->delayed_eh_pmp_map;
2503 new_map = old_map | mv_get_err_pmp_map(ap);
2504
2505 if (old_map != new_map) {
2506 pp->delayed_eh_pmp_map = new_map;
2507 mv_pmp_eh_prep(ap, new_map & ~old_map);
2508 }
c46938cc 2509 failed_links = hweight16(new_map);
4c299ca3 2510
a9a79dfe
JP
2511 ata_port_info(ap,
2512 "%s: pmp_map=%04x qc_map=%04x failed_links=%d nr_active_links=%d\n",
2513 __func__, pp->delayed_eh_pmp_map,
2514 ap->qc_active, failed_links,
2515 ap->nr_active_links);
4c299ca3 2516
06aaca3f 2517 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
4c299ca3
ML
2518 mv_process_crpb_entries(ap, pp);
2519 mv_stop_edma(ap);
2520 mv_eh_freeze(ap);
a9a79dfe 2521 ata_port_info(ap, "%s: done\n", __func__);
4c299ca3
ML
2522 return 1; /* handled */
2523 }
a9a79dfe 2524 ata_port_info(ap, "%s: waiting\n", __func__);
4c299ca3
ML
2525 return 1; /* handled */
2526}
2527
2528static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2529{
2530 /*
2531 * Possible future enhancement:
2532 *
2533 * FBS+non-NCQ operation is not yet implemented.
2534 * See related notes in mv_edma_cfg().
2535 *
2536 * Device error during FBS+non-NCQ operation:
2537 *
2538 * We need to snapshot the shadow registers for each failed command.
2539 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2540 */
2541 return 0; /* not handled */
2542}
2543
2544static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2545{
2546 struct mv_port_priv *pp = ap->private_data;
2547
2548 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2549 return 0; /* EDMA was not active: not handled */
2550 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2551 return 0; /* FBS was not active: not handled */
2552
2553 if (!(edma_err_cause & EDMA_ERR_DEV))
2554 return 0; /* non DEV error: not handled */
2555 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2556 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2557 return 0; /* other problems: not handled */
2558
2559 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2560 /*
2561 * EDMA should NOT have self-disabled for this case.
2562 * If it did, then something is wrong elsewhere,
2563 * and we cannot handle it here.
2564 */
2565 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
a9a79dfe
JP
2566 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2567 __func__, edma_err_cause, pp->pp_flags);
4c299ca3
ML
2568 return 0; /* not handled */
2569 }
2570 return mv_handle_fbs_ncq_dev_err(ap);
2571 } else {
2572 /*
2573 * EDMA should have self-disabled for this case.
2574 * If it did not, then something is wrong elsewhere,
2575 * and we cannot handle it here.
2576 */
2577 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
a9a79dfe
JP
2578 ata_port_warn(ap, "%s: err_cause=0x%x pp_flags=0x%x\n",
2579 __func__, edma_err_cause, pp->pp_flags);
4c299ca3
ML
2580 return 0; /* not handled */
2581 }
2582 return mv_handle_fbs_non_ncq_dev_err(ap);
2583 }
2584 return 0; /* not handled */
2585}
2586
a9010329 2587static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
8f767f8a 2588{
8f767f8a 2589 struct ata_eh_info *ehi = &ap->link.eh_info;
a9010329 2590 char *when = "idle";
8f767f8a 2591
8f767f8a 2592 ata_ehi_clear_desc(ehi);
3e4ec344 2593 if (edma_was_enabled) {
a9010329 2594 when = "EDMA enabled";
8f767f8a
ML
2595 } else {
2596 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2597 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
a9010329 2598 when = "polling";
8f767f8a 2599 }
a9010329 2600 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
8f767f8a
ML
2601 ehi->err_mask |= AC_ERR_OTHER;
2602 ehi->action |= ATA_EH_RESET;
2603 ata_port_freeze(ap);
2604}
2605
05b308e1
BR
2606/**
2607 * mv_err_intr - Handle error interrupts on the port
2608 * @ap: ATA channel to manipulate
2609 *
8d07379d
ML
2610 * Most cases require a full reset of the chip's state machine,
2611 * which also performs a COMRESET.
2612 * Also, if the port disabled DMA, update our cached copy to match.
05b308e1
BR
2613 *
2614 * LOCKING:
2615 * Inherited from caller.
2616 */
37b9046a 2617static void mv_err_intr(struct ata_port *ap)
31961943
BR
2618{
2619 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde 2620 u32 edma_err_cause, eh_freeze_mask, serr = 0;
e4006077 2621 u32 fis_cause = 0;
bdd4ddde
JG
2622 struct mv_port_priv *pp = ap->private_data;
2623 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 2624 unsigned int action = 0, err_mask = 0;
9af5c9c9 2625 struct ata_eh_info *ehi = &ap->link.eh_info;
37b9046a
ML
2626 struct ata_queued_cmd *qc;
2627 int abort = 0;
20f733e7 2628
8d07379d 2629 /*
37b9046a 2630 * Read and clear the SError and err_cause bits.
e4006077
ML
2631 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2632 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
8d07379d 2633 */
37b9046a
ML
2634 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2635 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2636
cae5a29d 2637 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
e4006077 2638 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
cae5a29d
ML
2639 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2640 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
e4006077 2641 }
cae5a29d 2642 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
bdd4ddde 2643
4c299ca3
ML
2644 if (edma_err_cause & EDMA_ERR_DEV) {
2645 /*
2646 * Device errors during FIS-based switching operation
2647 * require special handling.
2648 */
2649 if (mv_handle_dev_err(ap, edma_err_cause))
2650 return;
2651 }
2652
37b9046a
ML
2653 qc = mv_get_active_qc(ap);
2654 ata_ehi_clear_desc(ehi);
2655 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2656 edma_err_cause, pp->pp_flags);
e4006077 2657
c443c500 2658 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
e4006077 2659 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
cae5a29d 2660 if (fis_cause & FIS_IRQ_CAUSE_AN) {
c443c500
ML
2661 u32 ec = edma_err_cause &
2662 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2663 sata_async_notification(ap);
2664 if (!ec)
2665 return; /* Just an AN; no need for the nukes */
2666 ata_ehi_push_desc(ehi, "SDB notify");
2667 }
2668 }
bdd4ddde 2669 /*
352fab70 2670 * All generations share these EDMA error cause bits:
bdd4ddde 2671 */
37b9046a 2672 if (edma_err_cause & EDMA_ERR_DEV) {
bdd4ddde 2673 err_mask |= AC_ERR_DEV;
37b9046a
ML
2674 action |= ATA_EH_RESET;
2675 ata_ehi_push_desc(ehi, "dev error");
2676 }
bdd4ddde 2677 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 2678 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
2679 EDMA_ERR_INTRL_PAR)) {
2680 err_mask |= AC_ERR_ATA_BUS;
cf480626 2681 action |= ATA_EH_RESET;
b64bbc39 2682 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
2683 }
2684 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2685 ata_ehi_hotplugged(ehi);
2686 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 2687 "dev disconnect" : "dev connect");
cf480626 2688 action |= ATA_EH_RESET;
bdd4ddde
JG
2689 }
2690
352fab70
ML
2691 /*
2692 * Gen-I has a different SELF_DIS bit,
2693 * different FREEZE bits, and no SERR bit:
2694 */
ee9ccdf7 2695 if (IS_GEN_I(hpriv)) {
bdd4ddde 2696 eh_freeze_mask = EDMA_EH_FREEZE_5;
bdd4ddde 2697 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
bdd4ddde 2698 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 2699 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
2700 }
2701 } else {
2702 eh_freeze_mask = EDMA_EH_FREEZE;
bdd4ddde 2703 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
bdd4ddde 2704 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 2705 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde 2706 }
bdd4ddde 2707 if (edma_err_cause & EDMA_ERR_SERR) {
8d07379d
ML
2708 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2709 err_mask |= AC_ERR_ATA_BUS;
cf480626 2710 action |= ATA_EH_RESET;
bdd4ddde 2711 }
afb0edd9 2712 }
20f733e7 2713
bdd4ddde
JG
2714 if (!err_mask) {
2715 err_mask = AC_ERR_OTHER;
cf480626 2716 action |= ATA_EH_RESET;
bdd4ddde
JG
2717 }
2718
2719 ehi->serror |= serr;
2720 ehi->action |= action;
2721
2722 if (qc)
2723 qc->err_mask |= err_mask;
2724 else
2725 ehi->err_mask |= err_mask;
2726
37b9046a
ML
2727 if (err_mask == AC_ERR_DEV) {
2728 /*
2729 * Cannot do ata_port_freeze() here,
2730 * because it would kill PIO access,
2731 * which is needed for further diagnosis.
2732 */
2733 mv_eh_freeze(ap);
2734 abort = 1;
2735 } else if (edma_err_cause & eh_freeze_mask) {
2736 /*
2737 * Note to self: ata_port_freeze() calls ata_port_abort()
2738 */
bdd4ddde 2739 ata_port_freeze(ap);
37b9046a
ML
2740 } else {
2741 abort = 1;
2742 }
2743
2744 if (abort) {
2745 if (qc)
2746 ata_link_abort(qc->dev->link);
2747 else
2748 ata_port_abort(ap);
2749 }
bdd4ddde
JG
2750}
2751
1aadf5c3 2752static bool mv_process_crpb_response(struct ata_port *ap,
fcfb1f77
ML
2753 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2754{
752e386c
TH
2755 u8 ata_status;
2756 u16 edma_status = le16_to_cpu(response->flags);
752e386c
TH
2757
2758 /*
2759 * edma_status from a response queue entry:
2760 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2761 * MSB is saved ATA status from command completion.
2762 */
2763 if (!ncq_enabled) {
2764 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2765 if (err_cause) {
2766 /*
2767 * Error will be seen/handled by
2768 * mv_err_intr(). So do nothing at all here.
2769 */
1aadf5c3 2770 return false;
752e386c 2771 }
fcfb1f77 2772 }
752e386c
TH
2773 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2774 if (!ac_err_mask(ata_status))
1aadf5c3 2775 return true;
752e386c 2776 /* else: leave it for mv_err_intr() */
1aadf5c3 2777 return false;
fcfb1f77
ML
2778}
2779
2780static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
bdd4ddde
JG
2781{
2782 void __iomem *port_mmio = mv_ap_base(ap);
2783 struct mv_host_priv *hpriv = ap->host->private_data;
fcfb1f77 2784 u32 in_index;
bdd4ddde 2785 bool work_done = false;
1aadf5c3 2786 u32 done_mask = 0;
fcfb1f77 2787 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
bdd4ddde 2788
fcfb1f77 2789 /* Get the hardware queue position index */
cae5a29d 2790 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
bdd4ddde
JG
2791 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2792
fcfb1f77
ML
2793 /* Process new responses from since the last time we looked */
2794 while (in_index != pp->resp_idx) {
6c1153e0 2795 unsigned int tag;
fcfb1f77 2796 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
bdd4ddde 2797
fcfb1f77 2798 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
bdd4ddde 2799
fcfb1f77
ML
2800 if (IS_GEN_I(hpriv)) {
2801 /* 50xx: no NCQ, only one command active at a time */
9af5c9c9 2802 tag = ap->link.active_tag;
fcfb1f77
ML
2803 } else {
2804 /* Gen II/IIE: get command tag from CRPB entry */
2805 tag = le16_to_cpu(response->id) & 0x1f;
bdd4ddde 2806 }
1aadf5c3
TH
2807 if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
2808 done_mask |= 1 << tag;
bdd4ddde 2809 work_done = true;
bdd4ddde
JG
2810 }
2811
1aadf5c3
TH
2812 if (work_done) {
2813 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
2814
2815 /* Update the software queue position index in hardware */
bdd4ddde 2816 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
fcfb1f77 2817 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
cae5a29d 2818 port_mmio + EDMA_RSP_Q_OUT_PTR);
1aadf5c3 2819 }
20f733e7
BR
2820}
2821
a9010329
ML
2822static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2823{
2824 struct mv_port_priv *pp;
2825 int edma_was_enabled;
2826
a9010329
ML
2827 /*
2828 * Grab a snapshot of the EDMA_EN flag setting,
2829 * so that we have a consistent view for this port,
2830 * even if something we call of our routines changes it.
2831 */
2832 pp = ap->private_data;
2833 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2834 /*
2835 * Process completed CRPB response(s) before other events.
2836 */
2837 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2838 mv_process_crpb_entries(ap, pp);
4c299ca3
ML
2839 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2840 mv_handle_fbs_ncq_dev_err(ap);
a9010329
ML
2841 }
2842 /*
2843 * Handle chip-reported errors, or continue on to handle PIO.
2844 */
2845 if (unlikely(port_cause & ERR_IRQ)) {
2846 mv_err_intr(ap);
2847 } else if (!edma_was_enabled) {
2848 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2849 if (qc)
c3b28894 2850 ata_bmdma_port_intr(ap, qc);
a9010329
ML
2851 else
2852 mv_unexpected_intr(ap, edma_was_enabled);
2853 }
2854}
2855
05b308e1
BR
2856/**
2857 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 2858 * @host: host specific structure
7368f919 2859 * @main_irq_cause: Main interrupt cause register for the chip.
05b308e1
BR
2860 *
2861 * LOCKING:
2862 * Inherited from caller.
2863 */
7368f919 2864static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
20f733e7 2865{
f351b2d6 2866 struct mv_host_priv *hpriv = host->private_data;
eabd5eb1 2867 void __iomem *mmio = hpriv->base, *hc_mmio;
a3718c1f 2868 unsigned int handled = 0, port;
20f733e7 2869
2b748a0a
ML
2870 /* If asserted, clear the "all ports" IRQ coalescing bit */
2871 if (main_irq_cause & ALL_PORTS_COAL_DONE)
cae5a29d 2872 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2b748a0a 2873
a3718c1f 2874 for (port = 0; port < hpriv->n_ports; port++) {
cca3974e 2875 struct ata_port *ap = host->ports[port];
eabd5eb1
ML
2876 unsigned int p, shift, hardport, port_cause;
2877
a3718c1f 2878 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
a3718c1f 2879 /*
eabd5eb1
ML
2880 * Each hc within the host has its own hc_irq_cause register,
2881 * where the interrupting ports bits get ack'd.
a3718c1f 2882 */
eabd5eb1
ML
2883 if (hardport == 0) { /* first port on this hc ? */
2884 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2885 u32 port_mask, ack_irqs;
2886 /*
2887 * Skip this entire hc if nothing pending for any ports
2888 */
2889 if (!hc_cause) {
2890 port += MV_PORTS_PER_HC - 1;
2891 continue;
2892 }
2893 /*
2894 * We don't need/want to read the hc_irq_cause register,
2895 * because doing so hurts performance, and
2896 * main_irq_cause already gives us everything we need.
2897 *
2898 * But we do have to *write* to the hc_irq_cause to ack
2899 * the ports that we are handling this time through.
2900 *
2901 * This requires that we create a bitmap for those
2902 * ports which interrupted us, and use that bitmap
2903 * to ack (only) those ports via hc_irq_cause.
2904 */
2905 ack_irqs = 0;
2b748a0a
ML
2906 if (hc_cause & PORTS_0_3_COAL_DONE)
2907 ack_irqs = HC_COAL_IRQ;
eabd5eb1
ML
2908 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2909 if ((port + p) >= hpriv->n_ports)
2910 break;
2911 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2912 if (hc_cause & port_mask)
2913 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2914 }
a3718c1f 2915 hc_mmio = mv_hc_base_from_port(mmio, port);
cae5a29d 2916 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
a3718c1f
ML
2917 handled = 1;
2918 }
8f767f8a 2919 /*
a9010329 2920 * Handle interrupts signalled for this port:
8f767f8a 2921 */
a9010329
ML
2922 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2923 if (port_cause)
2924 mv_port_intr(ap, port_cause);
20f733e7 2925 }
a3718c1f 2926 return handled;
20f733e7
BR
2927}
2928
a3718c1f 2929static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
bdd4ddde 2930{
02a121da 2931 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
2932 struct ata_port *ap;
2933 struct ata_queued_cmd *qc;
2934 struct ata_eh_info *ehi;
2935 unsigned int i, err_mask, printed = 0;
2936 u32 err_cause;
2937
cae5a29d 2938 err_cause = readl(mmio + hpriv->irq_cause_offset);
bdd4ddde 2939
a44fec1f 2940 dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
bdd4ddde
JG
2941
2942 DPRINTK("All regs @ PCI error\n");
2943 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2944
cae5a29d 2945 writelfl(0, mmio + hpriv->irq_cause_offset);
bdd4ddde
JG
2946
2947 for (i = 0; i < host->n_ports; i++) {
2948 ap = host->ports[i];
936fd732 2949 if (!ata_link_offline(&ap->link)) {
9af5c9c9 2950 ehi = &ap->link.eh_info;
bdd4ddde
JG
2951 ata_ehi_clear_desc(ehi);
2952 if (!printed++)
2953 ata_ehi_push_desc(ehi,
2954 "PCI err cause 0x%08x", err_cause);
2955 err_mask = AC_ERR_HOST_BUS;
cf480626 2956 ehi->action = ATA_EH_RESET;
9af5c9c9 2957 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
2958 if (qc)
2959 qc->err_mask |= err_mask;
2960 else
2961 ehi->err_mask |= err_mask;
2962
2963 ata_port_freeze(ap);
2964 }
2965 }
a3718c1f 2966 return 1; /* handled */
bdd4ddde
JG
2967}
2968
05b308e1 2969/**
c5d3e45a 2970 * mv_interrupt - Main interrupt event handler
05b308e1
BR
2971 * @irq: unused
2972 * @dev_instance: private data; in this case the host structure
05b308e1
BR
2973 *
2974 * Read the read only register to determine if any host
2975 * controllers have pending interrupts. If so, call lower level
2976 * routine to handle. Also check for PCI errors which are only
2977 * reported here.
2978 *
8b260248 2979 * LOCKING:
cca3974e 2980 * This routine holds the host lock while processing pending
05b308e1
BR
2981 * interrupts.
2982 */
7d12e780 2983static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 2984{
cca3974e 2985 struct ata_host *host = dev_instance;
f351b2d6 2986 struct mv_host_priv *hpriv = host->private_data;
a3718c1f 2987 unsigned int handled = 0;
6d3c30ef 2988 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
96e2c487 2989 u32 main_irq_cause, pending_irqs;
20f733e7 2990
646a4da5 2991 spin_lock(&host->lock);
6d3c30ef
ML
2992
2993 /* for MSI: block new interrupts while in here */
2994 if (using_msi)
2b748a0a 2995 mv_write_main_irq_mask(0, hpriv);
6d3c30ef 2996
7368f919 2997 main_irq_cause = readl(hpriv->main_irq_cause_addr);
96e2c487 2998 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
352fab70
ML
2999 /*
3000 * Deal with cases where we either have nothing pending, or have read
3001 * a bogus register value which can indicate HW removal or PCI fault.
20f733e7 3002 */
a44253d2 3003 if (pending_irqs && main_irq_cause != 0xffffffffU) {
1f398472 3004 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
a3718c1f
ML
3005 handled = mv_pci_error(host, hpriv->base);
3006 else
a44253d2 3007 handled = mv_host_intr(host, pending_irqs);
bdd4ddde 3008 }
6d3c30ef
ML
3009
3010 /* for MSI: unmask; interrupt cause bits will retrigger now */
3011 if (using_msi)
2b748a0a 3012 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
6d3c30ef 3013
9d51af7b
ML
3014 spin_unlock(&host->lock);
3015
20f733e7
BR
3016 return IRQ_RETVAL(handled);
3017}
3018
c9d39130
JG
3019static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
3020{
3021 unsigned int ofs;
3022
3023 switch (sc_reg_in) {
3024 case SCR_STATUS:
3025 case SCR_ERROR:
3026 case SCR_CONTROL:
3027 ofs = sc_reg_in * sizeof(u32);
3028 break;
3029 default:
3030 ofs = 0xffffffffU;
3031 break;
3032 }
3033 return ofs;
3034}
3035
82ef04fb 3036static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
c9d39130 3037{
82ef04fb 3038 struct mv_host_priv *hpriv = link->ap->host->private_data;
f351b2d6 3039 void __iomem *mmio = hpriv->base;
82ef04fb 3040 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
c9d39130
JG
3041 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3042
da3dbb17
TH
3043 if (ofs != 0xffffffffU) {
3044 *val = readl(addr + ofs);
3045 return 0;
3046 } else
3047 return -EINVAL;
c9d39130
JG
3048}
3049
82ef04fb 3050static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
c9d39130 3051{
82ef04fb 3052 struct mv_host_priv *hpriv = link->ap->host->private_data;
f351b2d6 3053 void __iomem *mmio = hpriv->base;
82ef04fb 3054 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
c9d39130
JG
3055 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3056
da3dbb17 3057 if (ofs != 0xffffffffU) {
0d5ff566 3058 writelfl(val, addr + ofs);
da3dbb17
TH
3059 return 0;
3060 } else
3061 return -EINVAL;
c9d39130
JG
3062}
3063
7bb3c529 3064static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 3065{
7bb3c529 3066 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
3067 int early_5080;
3068
44c10138 3069 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
3070
3071 if (!early_5080) {
3072 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3073 tmp |= (1 << 0);
3074 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3075 }
3076
7bb3c529 3077 mv_reset_pci_bus(host, mmio);
522479fb
JG
3078}
3079
3080static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3081{
cae5a29d 3082 writel(0x0fcfffff, mmio + FLASH_CTL);
522479fb
JG
3083}
3084
47c2b677 3085static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
3086 void __iomem *mmio)
3087{
c9d39130
JG
3088 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3089 u32 tmp;
3090
3091 tmp = readl(phy_mmio + MV5_PHY_MODE);
3092
3093 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
3094 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
3095}
3096
47c2b677 3097static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 3098{
522479fb
JG
3099 u32 tmp;
3100
cae5a29d 3101 writel(0, mmio + GPIO_PORT_CTL);
522479fb
JG
3102
3103 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3104
3105 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3106 tmp |= ~(1 << 0);
3107 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
3108}
3109
2a47ce06
JG
3110static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3111 unsigned int port)
bca1c4eb 3112{
c9d39130
JG
3113 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3114 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3115 u32 tmp;
3116 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3117
3118 if (fix_apm_sq) {
cae5a29d 3119 tmp = readl(phy_mmio + MV5_LTMODE);
c9d39130 3120 tmp |= (1 << 19);
cae5a29d 3121 writel(tmp, phy_mmio + MV5_LTMODE);
c9d39130 3122
cae5a29d 3123 tmp = readl(phy_mmio + MV5_PHY_CTL);
c9d39130
JG
3124 tmp &= ~0x3;
3125 tmp |= 0x1;
cae5a29d 3126 writel(tmp, phy_mmio + MV5_PHY_CTL);
c9d39130
JG
3127 }
3128
3129 tmp = readl(phy_mmio + MV5_PHY_MODE);
3130 tmp &= ~mask;
3131 tmp |= hpriv->signal[port].pre;
3132 tmp |= hpriv->signal[port].amps;
3133 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
3134}
3135
c9d39130
JG
3136
3137#undef ZERO
3138#define ZERO(reg) writel(0, port_mmio + (reg))
3139static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3140 unsigned int port)
3141{
3142 void __iomem *port_mmio = mv_port_base(mmio, port);
3143
e12bef50 3144 mv_reset_channel(hpriv, mmio, port);
c9d39130
JG
3145
3146 ZERO(0x028); /* command */
cae5a29d 3147 writel(0x11f, port_mmio + EDMA_CFG);
c9d39130
JG
3148 ZERO(0x004); /* timer */
3149 ZERO(0x008); /* irq err cause */
3150 ZERO(0x00c); /* irq err mask */
3151 ZERO(0x010); /* rq bah */
3152 ZERO(0x014); /* rq inp */
3153 ZERO(0x018); /* rq outp */
3154 ZERO(0x01c); /* respq bah */
3155 ZERO(0x024); /* respq outp */
3156 ZERO(0x020); /* respq inp */
3157 ZERO(0x02c); /* test control */
cae5a29d 3158 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
c9d39130
JG
3159}
3160#undef ZERO
3161
3162#define ZERO(reg) writel(0, hc_mmio + (reg))
3163static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3164 unsigned int hc)
47c2b677 3165{
c9d39130
JG
3166 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3167 u32 tmp;
3168
3169 ZERO(0x00c);
3170 ZERO(0x010);
3171 ZERO(0x014);
3172 ZERO(0x018);
3173
3174 tmp = readl(hc_mmio + 0x20);
3175 tmp &= 0x1c1c1c1c;
3176 tmp |= 0x03030303;
3177 writel(tmp, hc_mmio + 0x20);
3178}
3179#undef ZERO
3180
3181static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3182 unsigned int n_hc)
3183{
3184 unsigned int hc, port;
3185
3186 for (hc = 0; hc < n_hc; hc++) {
3187 for (port = 0; port < MV_PORTS_PER_HC; port++)
3188 mv5_reset_hc_port(hpriv, mmio,
3189 (hc * MV_PORTS_PER_HC) + port);
3190
3191 mv5_reset_one_hc(hpriv, mmio, hc);
3192 }
3193
3194 return 0;
47c2b677
JG
3195}
3196
101ffae2
JG
3197#undef ZERO
3198#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 3199static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 3200{
02a121da 3201 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
3202 u32 tmp;
3203
cae5a29d 3204 tmp = readl(mmio + MV_PCI_MODE);
101ffae2 3205 tmp &= 0xff00ffff;
cae5a29d 3206 writel(tmp, mmio + MV_PCI_MODE);
101ffae2
JG
3207
3208 ZERO(MV_PCI_DISC_TIMER);
3209 ZERO(MV_PCI_MSI_TRIGGER);
cae5a29d 3210 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
101ffae2 3211 ZERO(MV_PCI_SERR_MASK);
cae5a29d
ML
3212 ZERO(hpriv->irq_cause_offset);
3213 ZERO(hpriv->irq_mask_offset);
101ffae2
JG
3214 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3215 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3216 ZERO(MV_PCI_ERR_ATTRIBUTE);
3217 ZERO(MV_PCI_ERR_COMMAND);
3218}
3219#undef ZERO
3220
3221static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3222{
3223 u32 tmp;
3224
3225 mv5_reset_flash(hpriv, mmio);
3226
cae5a29d 3227 tmp = readl(mmio + GPIO_PORT_CTL);
101ffae2
JG
3228 tmp &= 0x3;
3229 tmp |= (1 << 5) | (1 << 6);
cae5a29d 3230 writel(tmp, mmio + GPIO_PORT_CTL);
101ffae2
JG
3231}
3232
3233/**
3234 * mv6_reset_hc - Perform the 6xxx global soft reset
3235 * @mmio: base address of the HBA
3236 *
3237 * This routine only applies to 6xxx parts.
3238 *
3239 * LOCKING:
3240 * Inherited from caller.
3241 */
c9d39130
JG
3242static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3243 unsigned int n_hc)
101ffae2 3244{
cae5a29d 3245 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
101ffae2
JG
3246 int i, rc = 0;
3247 u32 t;
3248
3249 /* Following procedure defined in PCI "main command and status
3250 * register" table.
3251 */
3252 t = readl(reg);
3253 writel(t | STOP_PCI_MASTER, reg);
3254
3255 for (i = 0; i < 1000; i++) {
3256 udelay(1);
3257 t = readl(reg);
2dcb407e 3258 if (PCI_MASTER_EMPTY & t)
101ffae2 3259 break;
101ffae2
JG
3260 }
3261 if (!(PCI_MASTER_EMPTY & t)) {
3262 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3263 rc = 1;
3264 goto done;
3265 }
3266
3267 /* set reset */
3268 i = 5;
3269 do {
3270 writel(t | GLOB_SFT_RST, reg);
3271 t = readl(reg);
3272 udelay(1);
3273 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3274
3275 if (!(GLOB_SFT_RST & t)) {
3276 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3277 rc = 1;
3278 goto done;
3279 }
3280
3281 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3282 i = 5;
3283 do {
3284 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3285 t = readl(reg);
3286 udelay(1);
3287 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3288
3289 if (GLOB_SFT_RST & t) {
3290 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3291 rc = 1;
3292 }
3293done:
3294 return rc;
3295}
3296
47c2b677 3297static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
3298 void __iomem *mmio)
3299{
3300 void __iomem *port_mmio;
3301 u32 tmp;
3302
cae5a29d 3303 tmp = readl(mmio + RESET_CFG);
ba3fe8fb 3304 if ((tmp & (1 << 0)) == 0) {
47c2b677 3305 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
3306 hpriv->signal[idx].pre = 0x1 << 5;
3307 return;
3308 }
3309
3310 port_mmio = mv_port_base(mmio, idx);
3311 tmp = readl(port_mmio + PHY_MODE2);
3312
3313 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3314 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3315}
3316
47c2b677 3317static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 3318{
cae5a29d 3319 writel(0x00000060, mmio + GPIO_PORT_CTL);
ba3fe8fb
JG
3320}
3321
c9d39130 3322static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 3323 unsigned int port)
bca1c4eb 3324{
c9d39130
JG
3325 void __iomem *port_mmio = mv_port_base(mmio, port);
3326
bca1c4eb 3327 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
3328 int fix_phy_mode2 =
3329 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 3330 int fix_phy_mode4 =
47c2b677 3331 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
8c30a8b9 3332 u32 m2, m3;
47c2b677
JG
3333
3334 if (fix_phy_mode2) {
3335 m2 = readl(port_mmio + PHY_MODE2);
3336 m2 &= ~(1 << 16);
3337 m2 |= (1 << 31);
3338 writel(m2, port_mmio + PHY_MODE2);
3339
3340 udelay(200);
3341
3342 m2 = readl(port_mmio + PHY_MODE2);
3343 m2 &= ~((1 << 16) | (1 << 31));
3344 writel(m2, port_mmio + PHY_MODE2);
3345
3346 udelay(200);
3347 }
3348
8c30a8b9
ML
3349 /*
3350 * Gen-II/IIe PHY_MODE3 errata RM#2:
3351 * Achieves better receiver noise performance than the h/w default:
3352 */
3353 m3 = readl(port_mmio + PHY_MODE3);
3354 m3 = (m3 & 0x1f) | (0x5555601 << 5);
bca1c4eb 3355
0388a8c0
ML
3356 /* Guideline 88F5182 (GL# SATA-S11) */
3357 if (IS_SOC(hpriv))
3358 m3 &= ~0x1c;
3359
bca1c4eb 3360 if (fix_phy_mode4) {
ba069e37
ML
3361 u32 m4 = readl(port_mmio + PHY_MODE4);
3362 /*
3363 * Enforce reserved-bit restrictions on GenIIe devices only.
3364 * For earlier chipsets, force only the internal config field
3365 * (workaround for errata FEr SATA#10 part 1).
3366 */
8c30a8b9 3367 if (IS_GEN_IIE(hpriv))
ba069e37
ML
3368 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3369 else
3370 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
8c30a8b9 3371 writel(m4, port_mmio + PHY_MODE4);
bca1c4eb 3372 }
b406c7a6
ML
3373 /*
3374 * Workaround for 60x1-B2 errata SATA#13:
3375 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3376 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
ba68460b 3377 * Or ensure we use writelfl() when writing PHY_MODE4.
b406c7a6
ML
3378 */
3379 writel(m3, port_mmio + PHY_MODE3);
bca1c4eb
JG
3380
3381 /* Revert values of pre-emphasis and signal amps to the saved ones */
3382 m2 = readl(port_mmio + PHY_MODE2);
3383
3384 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
3385 m2 |= hpriv->signal[port].amps;
3386 m2 |= hpriv->signal[port].pre;
47c2b677 3387 m2 &= ~(1 << 16);
bca1c4eb 3388
e4e7b892
JG
3389 /* according to mvSata 3.6.1, some IIE values are fixed */
3390 if (IS_GEN_IIE(hpriv)) {
3391 m2 &= ~0xC30FF01F;
3392 m2 |= 0x0000900F;
3393 }
3394
bca1c4eb
JG
3395 writel(m2, port_mmio + PHY_MODE2);
3396}
3397
f351b2d6
SB
3398/* TODO: use the generic LED interface to configure the SATA Presence */
3399/* & Acitivy LEDs on the board */
3400static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3401 void __iomem *mmio)
3402{
3403 return;
3404}
3405
3406static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3407 void __iomem *mmio)
3408{
3409 void __iomem *port_mmio;
3410 u32 tmp;
3411
3412 port_mmio = mv_port_base(mmio, idx);
3413 tmp = readl(port_mmio + PHY_MODE2);
3414
3415 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3416 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3417}
3418
3419#undef ZERO
3420#define ZERO(reg) writel(0, port_mmio + (reg))
3421static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3422 void __iomem *mmio, unsigned int port)
3423{
3424 void __iomem *port_mmio = mv_port_base(mmio, port);
3425
e12bef50 3426 mv_reset_channel(hpriv, mmio, port);
f351b2d6
SB
3427
3428 ZERO(0x028); /* command */
cae5a29d 3429 writel(0x101f, port_mmio + EDMA_CFG);
f351b2d6
SB
3430 ZERO(0x004); /* timer */
3431 ZERO(0x008); /* irq err cause */
3432 ZERO(0x00c); /* irq err mask */
3433 ZERO(0x010); /* rq bah */
3434 ZERO(0x014); /* rq inp */
3435 ZERO(0x018); /* rq outp */
3436 ZERO(0x01c); /* respq bah */
3437 ZERO(0x024); /* respq outp */
3438 ZERO(0x020); /* respq inp */
3439 ZERO(0x02c); /* test control */
d7b0c143 3440 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
f351b2d6
SB
3441}
3442
3443#undef ZERO
3444
3445#define ZERO(reg) writel(0, hc_mmio + (reg))
3446static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3447 void __iomem *mmio)
3448{
3449 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3450
3451 ZERO(0x00c);
3452 ZERO(0x010);
3453 ZERO(0x014);
3454
3455}
3456
3457#undef ZERO
3458
3459static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3460 void __iomem *mmio, unsigned int n_hc)
3461{
3462 unsigned int port;
3463
3464 for (port = 0; port < hpriv->n_ports; port++)
3465 mv_soc_reset_hc_port(hpriv, mmio, port);
3466
3467 mv_soc_reset_one_hc(hpriv, mmio);
3468
3469 return 0;
3470}
3471
3472static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3473 void __iomem *mmio)
3474{
3475 return;
3476}
3477
3478static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3479{
3480 return;
3481}
3482
29b7e43c
MM
3483static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3484 void __iomem *mmio, unsigned int port)
3485{
3486 void __iomem *port_mmio = mv_port_base(mmio, port);
3487 u32 reg;
3488
3489 reg = readl(port_mmio + PHY_MODE3);
3490 reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
3491 reg |= (0x1 << 27);
3492 reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
3493 reg |= (0x1 << 29);
3494 writel(reg, port_mmio + PHY_MODE3);
3495
3496 reg = readl(port_mmio + PHY_MODE4);
3497 reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3498 reg |= (0x1 << 16);
3499 writel(reg, port_mmio + PHY_MODE4);
3500
3501 reg = readl(port_mmio + PHY_MODE9_GEN2);
3502 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3503 reg |= 0x8;
3504 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3505 writel(reg, port_mmio + PHY_MODE9_GEN2);
3506
3507 reg = readl(port_mmio + PHY_MODE9_GEN1);
3508 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3509 reg |= 0x8;
3510 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3511 writel(reg, port_mmio + PHY_MODE9_GEN1);
3512}
3513
3514/**
3515 * soc_is_65 - check if the soc is 65 nano device
3516 *
3517 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3518 * register, this register should contain non-zero value and it exists only
3519 * in the 65 nano devices, when reading it from older devices we get 0.
3520 */
3521static bool soc_is_65n(struct mv_host_priv *hpriv)
3522{
3523 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3524
3525 if (readl(port0_mmio + PHYCFG_OFS))
3526 return true;
3527 return false;
3528}
3529
8e7decdb 3530static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
b67a1064 3531{
cae5a29d 3532 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
b67a1064 3533
8e7decdb 3534 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
b67a1064 3535 if (want_gen2i)
8e7decdb 3536 ifcfg |= (1 << 7); /* enable gen2i speed */
cae5a29d 3537 writelfl(ifcfg, port_mmio + SATA_IFCFG);
b67a1064
ML
3538}
3539
e12bef50 3540static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130
JG
3541 unsigned int port_no)
3542{
3543 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3544
8e7decdb
ML
3545 /*
3546 * The datasheet warns against setting EDMA_RESET when EDMA is active
3547 * (but doesn't say what the problem might be). So we first try
3548 * to disable the EDMA engine before doing the EDMA_RESET operation.
3549 */
0d8be5cb 3550 mv_stop_edma_engine(port_mmio);
cae5a29d 3551 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
c9d39130 3552
b67a1064 3553 if (!IS_GEN_I(hpriv)) {
8e7decdb
ML
3554 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3555 mv_setup_ifcfg(port_mmio, 1);
c9d39130 3556 }
b67a1064 3557 /*
8e7decdb 3558 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
b67a1064 3559 * link, and physical layers. It resets all SATA interface registers
cae5a29d 3560 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
c9d39130 3561 */
cae5a29d 3562 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
b67a1064 3563 udelay(25); /* allow reset propagation */
cae5a29d 3564 writelfl(0, port_mmio + EDMA_CMD);
c9d39130
JG
3565
3566 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3567
ee9ccdf7 3568 if (IS_GEN_I(hpriv))
c9d39130
JG
3569 mdelay(1);
3570}
3571
e49856d8 3572static void mv_pmp_select(struct ata_port *ap, int pmp)
20f733e7 3573{
e49856d8
ML
3574 if (sata_pmp_supported(ap)) {
3575 void __iomem *port_mmio = mv_ap_base(ap);
cae5a29d 3576 u32 reg = readl(port_mmio + SATA_IFCTL);
e49856d8 3577 int old = reg & 0xf;
22374677 3578
e49856d8
ML
3579 if (old != pmp) {
3580 reg = (reg & ~0xf) | pmp;
cae5a29d 3581 writelfl(reg, port_mmio + SATA_IFCTL);
e49856d8 3582 }
22374677 3583 }
20f733e7
BR
3584}
3585
e49856d8
ML
3586static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3587 unsigned long deadline)
22374677 3588{
e49856d8
ML
3589 mv_pmp_select(link->ap, sata_srst_pmp(link));
3590 return sata_std_hardreset(link, class, deadline);
3591}
bdd4ddde 3592
e49856d8
ML
3593static int mv_softreset(struct ata_link *link, unsigned int *class,
3594 unsigned long deadline)
3595{
3596 mv_pmp_select(link->ap, sata_srst_pmp(link));
3597 return ata_sff_softreset(link, class, deadline);
22374677
JG
3598}
3599
cc0680a5 3600static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 3601 unsigned long deadline)
31961943 3602{
cc0680a5 3603 struct ata_port *ap = link->ap;
bdd4ddde 3604 struct mv_host_priv *hpriv = ap->host->private_data;
b562468c 3605 struct mv_port_priv *pp = ap->private_data;
f351b2d6 3606 void __iomem *mmio = hpriv->base;
0d8be5cb
ML
3607 int rc, attempts = 0, extra = 0;
3608 u32 sstatus;
3609 bool online;
31961943 3610
e12bef50 3611 mv_reset_channel(hpriv, mmio, ap->port_no);
b562468c 3612 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
d16ab3f6
ML
3613 pp->pp_flags &=
3614 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
bdd4ddde 3615
0d8be5cb
ML
3616 /* Workaround for errata FEr SATA#10 (part 2) */
3617 do {
17c5aab5
ML
3618 const unsigned long *timing =
3619 sata_ehc_deb_timing(&link->eh_context);
bdd4ddde 3620
17c5aab5
ML
3621 rc = sata_link_hardreset(link, timing, deadline + extra,
3622 &online, NULL);
9dcffd99 3623 rc = online ? -EAGAIN : rc;
17c5aab5 3624 if (rc)
0d8be5cb 3625 return rc;
0d8be5cb
ML
3626 sata_scr_read(link, SCR_STATUS, &sstatus);
3627 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3628 /* Force 1.5gb/s link speed and try again */
8e7decdb 3629 mv_setup_ifcfg(mv_ap_base(ap), 0);
0d8be5cb
ML
3630 if (time_after(jiffies + HZ, deadline))
3631 extra = HZ; /* only extend it once, max */
3632 }
3633 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
08da1759 3634 mv_save_cached_regs(ap);
66e57a2c 3635 mv_edma_cfg(ap, 0, 0);
bdd4ddde 3636
17c5aab5 3637 return rc;
bdd4ddde
JG
3638}
3639
bdd4ddde
JG
3640static void mv_eh_freeze(struct ata_port *ap)
3641{
1cfd19ae 3642 mv_stop_edma(ap);
c4de573b 3643 mv_enable_port_irqs(ap, 0);
bdd4ddde
JG
3644}
3645
3646static void mv_eh_thaw(struct ata_port *ap)
3647{
f351b2d6 3648 struct mv_host_priv *hpriv = ap->host->private_data;
c4de573b
ML
3649 unsigned int port = ap->port_no;
3650 unsigned int hardport = mv_hardport_from_port(port);
1cfd19ae 3651 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
bdd4ddde 3652 void __iomem *port_mmio = mv_ap_base(ap);
c4de573b 3653 u32 hc_irq_cause;
bdd4ddde 3654
bdd4ddde 3655 /* clear EDMA errors on this port */
cae5a29d 3656 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
bdd4ddde
JG
3657
3658 /* clear pending irq events */
cae6edc3 3659 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
cae5a29d 3660 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
bdd4ddde 3661
88e675e1 3662 mv_enable_port_irqs(ap, ERR_IRQ);
31961943
BR
3663}
3664
05b308e1
BR
3665/**
3666 * mv_port_init - Perform some early initialization on a single port.
3667 * @port: libata data structure storing shadow register addresses
3668 * @port_mmio: base address of the port
3669 *
3670 * Initialize shadow register mmio addresses, clear outstanding
3671 * interrupts on the port, and unmask interrupts for the future
3672 * start of the port.
3673 *
3674 * LOCKING:
3675 * Inherited from caller.
3676 */
31961943 3677static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 3678{
cae5a29d 3679 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
31961943 3680
8b260248 3681 /* PIO related setup
31961943
BR
3682 */
3683 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 3684 port->error_addr =
31961943
BR
3685 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3686 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3687 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3688 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3689 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3690 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 3691 port->status_addr =
31961943
BR
3692 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3693 /* special case: control/altstatus doesn't have ATA_REG_ address */
cae5a29d 3694 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
31961943 3695
31961943 3696 /* Clear any currently outstanding port interrupt conditions */
cae5a29d
ML
3697 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3698 writelfl(readl(serr), serr);
3699 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
31961943 3700
646a4da5 3701 /* unmask all non-transient EDMA error interrupts */
cae5a29d 3702 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
20f733e7 3703
8b260248 3704 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
cae5a29d
ML
3705 readl(port_mmio + EDMA_CFG),
3706 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3707 readl(port_mmio + EDMA_ERR_IRQ_MASK));
20f733e7
BR
3708}
3709
616d4a98
ML
3710static unsigned int mv_in_pcix_mode(struct ata_host *host)
3711{
3712 struct mv_host_priv *hpriv = host->private_data;
3713 void __iomem *mmio = hpriv->base;
3714 u32 reg;
3715
1f398472 3716 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
616d4a98 3717 return 0; /* not PCI-X capable */
cae5a29d 3718 reg = readl(mmio + MV_PCI_MODE);
616d4a98
ML
3719 if ((reg & MV_PCI_MODE_MASK) == 0)
3720 return 0; /* conventional PCI mode */
3721 return 1; /* chip is in PCI-X mode */
3722}
3723
3724static int mv_pci_cut_through_okay(struct ata_host *host)
3725{
3726 struct mv_host_priv *hpriv = host->private_data;
3727 void __iomem *mmio = hpriv->base;
3728 u32 reg;
3729
3730 if (!mv_in_pcix_mode(host)) {
cae5a29d
ML
3731 reg = readl(mmio + MV_PCI_COMMAND);
3732 if (reg & MV_PCI_COMMAND_MRDTRIG)
616d4a98
ML
3733 return 0; /* not okay */
3734 }
3735 return 1; /* okay */
3736}
3737
65ad7fef
ML
3738static void mv_60x1b2_errata_pci7(struct ata_host *host)
3739{
3740 struct mv_host_priv *hpriv = host->private_data;
3741 void __iomem *mmio = hpriv->base;
3742
3743 /* workaround for 60x1-B2 errata PCI#7 */
3744 if (mv_in_pcix_mode(host)) {
cae5a29d
ML
3745 u32 reg = readl(mmio + MV_PCI_COMMAND);
3746 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
65ad7fef
ML
3747 }
3748}
3749
4447d351 3750static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 3751{
4447d351
TH
3752 struct pci_dev *pdev = to_pci_dev(host->dev);
3753 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
3754 u32 hp_flags = hpriv->hp_flags;
3755
5796d1c4 3756 switch (board_idx) {
47c2b677
JG
3757 case chip_5080:
3758 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 3759 hp_flags |= MV_HP_GEN_I;
47c2b677 3760
44c10138 3761 switch (pdev->revision) {
47c2b677
JG
3762 case 0x1:
3763 hp_flags |= MV_HP_ERRATA_50XXB0;
3764 break;
3765 case 0x3:
3766 hp_flags |= MV_HP_ERRATA_50XXB2;
3767 break;
3768 default:
a44fec1f
JP
3769 dev_warn(&pdev->dev,
3770 "Applying 50XXB2 workarounds to unknown rev\n");
47c2b677
JG
3771 hp_flags |= MV_HP_ERRATA_50XXB2;
3772 break;
3773 }
3774 break;
3775
bca1c4eb
JG
3776 case chip_504x:
3777 case chip_508x:
47c2b677 3778 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 3779 hp_flags |= MV_HP_GEN_I;
bca1c4eb 3780
44c10138 3781 switch (pdev->revision) {
47c2b677
JG
3782 case 0x0:
3783 hp_flags |= MV_HP_ERRATA_50XXB0;
3784 break;
3785 case 0x3:
3786 hp_flags |= MV_HP_ERRATA_50XXB2;
3787 break;
3788 default:
a44fec1f
JP
3789 dev_warn(&pdev->dev,
3790 "Applying B2 workarounds to unknown rev\n");
47c2b677
JG
3791 hp_flags |= MV_HP_ERRATA_50XXB2;
3792 break;
bca1c4eb
JG
3793 }
3794 break;
3795
3796 case chip_604x:
3797 case chip_608x:
47c2b677 3798 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 3799 hp_flags |= MV_HP_GEN_II;
47c2b677 3800
44c10138 3801 switch (pdev->revision) {
47c2b677 3802 case 0x7:
65ad7fef 3803 mv_60x1b2_errata_pci7(host);
47c2b677
JG
3804 hp_flags |= MV_HP_ERRATA_60X1B2;
3805 break;
3806 case 0x9:
3807 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
3808 break;
3809 default:
a44fec1f
JP
3810 dev_warn(&pdev->dev,
3811 "Applying B2 workarounds to unknown rev\n");
47c2b677 3812 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
3813 break;
3814 }
3815 break;
3816
e4e7b892 3817 case chip_7042:
616d4a98 3818 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
306b30f7
ML
3819 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3820 (pdev->device == 0x2300 || pdev->device == 0x2310))
3821 {
4e520033
ML
3822 /*
3823 * Highpoint RocketRAID PCIe 23xx series cards:
3824 *
3825 * Unconfigured drives are treated as "Legacy"
3826 * by the BIOS, and it overwrites sector 8 with
3827 * a "Lgcy" metadata block prior to Linux boot.
3828 *
3829 * Configured drives (RAID or JBOD) leave sector 8
3830 * alone, but instead overwrite a high numbered
3831 * sector for the RAID metadata. This sector can
3832 * be determined exactly, by truncating the physical
3833 * drive capacity to a nice even GB value.
3834 *
3835 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3836 *
3837 * Warn the user, lest they think we're just buggy.
3838 */
3839 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3840 " BIOS CORRUPTS DATA on all attached drives,"
3841 " regardless of if/how they are configured."
3842 " BEWARE!\n");
3843 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3844 " use sectors 8-9 on \"Legacy\" drives,"
3845 " and avoid the final two gigabytes on"
3846 " all RocketRAID BIOS initialized drives.\n");
306b30f7 3847 }
8e7decdb 3848 /* drop through */
e4e7b892
JG
3849 case chip_6042:
3850 hpriv->ops = &mv6xxx_ops;
e4e7b892 3851 hp_flags |= MV_HP_GEN_IIE;
616d4a98
ML
3852 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3853 hp_flags |= MV_HP_CUT_THROUGH;
e4e7b892 3854
44c10138 3855 switch (pdev->revision) {
5cf73bfb 3856 case 0x2: /* Rev.B0: the first/only public release */
e4e7b892
JG
3857 hp_flags |= MV_HP_ERRATA_60X1C0;
3858 break;
3859 default:
a44fec1f
JP
3860 dev_warn(&pdev->dev,
3861 "Applying 60X1C0 workarounds to unknown rev\n");
e4e7b892
JG
3862 hp_flags |= MV_HP_ERRATA_60X1C0;
3863 break;
3864 }
3865 break;
f351b2d6 3866 case chip_soc:
29b7e43c
MM
3867 if (soc_is_65n(hpriv))
3868 hpriv->ops = &mv_soc_65n_ops;
3869 else
3870 hpriv->ops = &mv_soc_ops;
eb3a55a9
SB
3871 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3872 MV_HP_ERRATA_60X1C0;
f351b2d6 3873 break;
e4e7b892 3874
bca1c4eb 3875 default:
a44fec1f 3876 dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
3877 return 1;
3878 }
3879
3880 hpriv->hp_flags = hp_flags;
02a121da 3881 if (hp_flags & MV_HP_PCIE) {
cae5a29d
ML
3882 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3883 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
02a121da
ML
3884 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3885 } else {
cae5a29d
ML
3886 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3887 hpriv->irq_mask_offset = PCI_IRQ_MASK;
02a121da
ML
3888 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3889 }
bca1c4eb
JG
3890
3891 return 0;
3892}
3893
05b308e1 3894/**
47c2b677 3895 * mv_init_host - Perform some early initialization of the host.
4447d351 3896 * @host: ATA host to initialize
05b308e1
BR
3897 *
3898 * If possible, do an early global reset of the host. Then do
3899 * our port init and clear/unmask all/relevant host interrupts.
3900 *
3901 * LOCKING:
3902 * Inherited from caller.
3903 */
1bfeff03 3904static int mv_init_host(struct ata_host *host)
20f733e7
BR
3905{
3906 int rc = 0, n_hc, port, hc;
4447d351 3907 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 3908 void __iomem *mmio = hpriv->base;
47c2b677 3909
1bfeff03 3910 rc = mv_chip_id(host, hpriv->board_idx);
bca1c4eb 3911 if (rc)
352fab70 3912 goto done;
f351b2d6 3913
1f398472 3914 if (IS_SOC(hpriv)) {
cae5a29d
ML
3915 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3916 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
1f398472 3917 } else {
cae5a29d
ML
3918 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3919 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
f351b2d6 3920 }
352fab70 3921
5d0fb2e7
TR
3922 /* initialize shadow irq mask with register's value */
3923 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3924
352fab70 3925 /* global interrupt mask: 0 == mask everything */
c4de573b 3926 mv_set_main_irq_mask(host, ~0, 0);
bca1c4eb 3927
4447d351 3928 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 3929
4447d351 3930 for (port = 0; port < host->n_ports; port++)
29b7e43c
MM
3931 if (hpriv->ops->read_preamp)
3932 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 3933
c9d39130 3934 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 3935 if (rc)
20f733e7 3936 goto done;
20f733e7 3937
522479fb 3938 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 3939 hpriv->ops->reset_bus(host, mmio);
47c2b677 3940 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 3941
4447d351 3942 for (port = 0; port < host->n_ports; port++) {
cbcdd875 3943 struct ata_port *ap = host->ports[port];
2a47ce06 3944 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
3945
3946 mv_port_init(&ap->ioaddr, port_mmio);
20f733e7
BR
3947 }
3948
3949 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
3950 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3951
3952 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3953 "(before clear)=0x%08x\n", hc,
cae5a29d
ML
3954 readl(hc_mmio + HC_CFG),
3955 readl(hc_mmio + HC_IRQ_CAUSE));
31961943
BR
3956
3957 /* Clear any currently outstanding hc interrupt conditions */
cae5a29d 3958 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
20f733e7
BR
3959 }
3960
44c65d16
ML
3961 if (!IS_SOC(hpriv)) {
3962 /* Clear any currently outstanding host interrupt conditions */
cae5a29d 3963 writelfl(0, mmio + hpriv->irq_cause_offset);
31961943 3964
44c65d16 3965 /* and unmask interrupt generation for host regs */
cae5a29d 3966 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
44c65d16 3967 }
51de32d2 3968
6be96ac1
ML
3969 /*
3970 * enable only global host interrupts for now.
3971 * The per-port interrupts get done later as ports are set up.
3972 */
3973 mv_set_main_irq_mask(host, 0, PCI_ERR);
2b748a0a
ML
3974 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3975 irq_coalescing_usecs);
f351b2d6
SB
3976done:
3977 return rc;
3978}
fb621e2f 3979
fbf14e2f
BB
3980static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3981{
3982 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3983 MV_CRQB_Q_SZ, 0);
3984 if (!hpriv->crqb_pool)
3985 return -ENOMEM;
3986
3987 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3988 MV_CRPB_Q_SZ, 0);
3989 if (!hpriv->crpb_pool)
3990 return -ENOMEM;
3991
3992 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3993 MV_SG_TBL_SZ, 0);
3994 if (!hpriv->sg_tbl_pool)
3995 return -ENOMEM;
3996
3997 return 0;
3998}
3999
15a32632 4000static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
63a9332b 4001 const struct mbus_dram_target_info *dram)
15a32632
LB
4002{
4003 int i;
4004
4005 for (i = 0; i < 4; i++) {
4006 writel(0, hpriv->base + WINDOW_CTRL(i));
4007 writel(0, hpriv->base + WINDOW_BASE(i));
4008 }
4009
4010 for (i = 0; i < dram->num_cs; i++) {
63a9332b 4011 const struct mbus_dram_window *cs = dram->cs + i;
15a32632
LB
4012
4013 writel(((cs->size - 1) & 0xffff0000) |
4014 (cs->mbus_attr << 8) |
4015 (dram->mbus_dram_target_id << 4) | 1,
4016 hpriv->base + WINDOW_CTRL(i));
4017 writel(cs->base, hpriv->base + WINDOW_BASE(i));
4018 }
4019}
4020
f351b2d6
SB
4021/**
4022 * mv_platform_probe - handle a positive probe of an soc Marvell
4023 * host
4024 * @pdev: platform device found
4025 *
4026 * LOCKING:
4027 * Inherited from caller.
4028 */
4029static int mv_platform_probe(struct platform_device *pdev)
4030{
f351b2d6 4031 const struct mv_sata_platform_data *mv_platform_data;
63a9332b 4032 const struct mbus_dram_target_info *dram;
f351b2d6
SB
4033 const struct ata_port_info *ppi[] =
4034 { &mv_port_info[chip_soc], NULL };
4035 struct ata_host *host;
4036 struct mv_host_priv *hpriv;
4037 struct resource *res;
97b414e1 4038 int n_ports = 0, irq = 0;
99b80e97 4039 int rc;
eee98990 4040 int port;
20f733e7 4041
06296a1e 4042 ata_print_version_once(&pdev->dev, DRV_VERSION);
bca1c4eb 4043
f351b2d6
SB
4044 /*
4045 * Simple resource validation ..
4046 */
4047 if (unlikely(pdev->num_resources != 2)) {
4048 dev_err(&pdev->dev, "invalid number of resources\n");
4049 return -EINVAL;
4050 }
4051
4052 /*
4053 * Get the register base first
4054 */
4055 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4056 if (res == NULL)
4057 return -EINVAL;
4058
4059 /* allocate host */
97b414e1
AL
4060 if (pdev->dev.of_node) {
4061 of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
4062 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
4063 } else {
61b8c345 4064 mv_platform_data = dev_get_platdata(&pdev->dev);
97b414e1
AL
4065 n_ports = mv_platform_data->n_ports;
4066 irq = platform_get_irq(pdev, 0);
4067 }
f351b2d6
SB
4068
4069 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4070 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4071
4072 if (!host || !hpriv)
4073 return -ENOMEM;
eee98990
AL
4074 hpriv->port_clks = devm_kzalloc(&pdev->dev,
4075 sizeof(struct clk *) * n_ports,
4076 GFP_KERNEL);
4077 if (!hpriv->port_clks)
4078 return -ENOMEM;
f351b2d6
SB
4079 host->private_data = hpriv;
4080 hpriv->n_ports = n_ports;
1bfeff03 4081 hpriv->board_idx = chip_soc;
f351b2d6
SB
4082
4083 host->iomap = NULL;
f1cb0ea1 4084 hpriv->base = devm_ioremap(&pdev->dev, res->start,
041b5eac 4085 resource_size(res));
cae5a29d 4086 hpriv->base -= SATAHC0_REG_BASE;
f351b2d6 4087
c77a2f4e
SB
4088 hpriv->clk = clk_get(&pdev->dev, NULL);
4089 if (IS_ERR(hpriv->clk))
eee98990 4090 dev_notice(&pdev->dev, "cannot get optional clkdev\n");
c77a2f4e 4091 else
eee98990
AL
4092 clk_prepare_enable(hpriv->clk);
4093
4094 for (port = 0; port < n_ports; port++) {
4095 char port_number[16];
4096 sprintf(port_number, "%d", port);
4097 hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4098 if (!IS_ERR(hpriv->port_clks[port]))
4099 clk_prepare_enable(hpriv->port_clks[port]);
4100 }
c77a2f4e 4101
15a32632
LB
4102 /*
4103 * (Re-)program MBUS remapping windows if we are asked to.
4104 */
63a9332b
AL
4105 dram = mv_mbus_dram_info();
4106 if (dram)
4107 mv_conf_mbus_windows(hpriv, dram);
15a32632 4108
fbf14e2f
BB
4109 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4110 if (rc)
c77a2f4e 4111 goto err;
fbf14e2f 4112
f351b2d6 4113 /* initialize adapter */
1bfeff03 4114 rc = mv_init_host(host);
f351b2d6 4115 if (rc)
c77a2f4e 4116 goto err;
f351b2d6 4117
a44fec1f
JP
4118 dev_info(&pdev->dev, "slots %u ports %d\n",
4119 (unsigned)MV_MAX_Q_DEPTH, host->n_ports);
f351b2d6 4120
97b414e1 4121 rc = ata_host_activate(host, irq, mv_interrupt, IRQF_SHARED, &mv6_sht);
c00a4c9d
SS
4122 if (!rc)
4123 return 0;
4124
c77a2f4e 4125err:
c77a2f4e 4126 if (!IS_ERR(hpriv->clk)) {
eee98990 4127 clk_disable_unprepare(hpriv->clk);
c77a2f4e
SB
4128 clk_put(hpriv->clk);
4129 }
eee98990
AL
4130 for (port = 0; port < n_ports; port++) {
4131 if (!IS_ERR(hpriv->port_clks[port])) {
4132 clk_disable_unprepare(hpriv->port_clks[port]);
4133 clk_put(hpriv->port_clks[port]);
4134 }
4135 }
c77a2f4e
SB
4136
4137 return rc;
f351b2d6
SB
4138}
4139
4140/*
4141 *
4142 * mv_platform_remove - unplug a platform interface
4143 * @pdev: platform device
4144 *
4145 * A platform bus SATA device has been unplugged. Perform the needed
4146 * cleanup. Also called on module unload for any active devices.
4147 */
0ec24914 4148static int mv_platform_remove(struct platform_device *pdev)
f351b2d6 4149{
d8661921 4150 struct ata_host *host = platform_get_drvdata(pdev);
c77a2f4e 4151 struct mv_host_priv *hpriv = host->private_data;
eee98990 4152 int port;
f351b2d6 4153 ata_host_detach(host);
c77a2f4e 4154
c77a2f4e 4155 if (!IS_ERR(hpriv->clk)) {
eee98990 4156 clk_disable_unprepare(hpriv->clk);
c77a2f4e
SB
4157 clk_put(hpriv->clk);
4158 }
eee98990
AL
4159 for (port = 0; port < host->n_ports; port++) {
4160 if (!IS_ERR(hpriv->port_clks[port])) {
4161 clk_disable_unprepare(hpriv->port_clks[port]);
4162 clk_put(hpriv->port_clks[port]);
4163 }
4164 }
f351b2d6 4165 return 0;
20f733e7
BR
4166}
4167
6481f2b5
SB
4168#ifdef CONFIG_PM
4169static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4170{
d8661921 4171 struct ata_host *host = platform_get_drvdata(pdev);
6481f2b5
SB
4172 if (host)
4173 return ata_host_suspend(host, state);
4174 else
4175 return 0;
4176}
4177
4178static int mv_platform_resume(struct platform_device *pdev)
4179{
d8661921 4180 struct ata_host *host = platform_get_drvdata(pdev);
63a9332b 4181 const struct mbus_dram_target_info *dram;
6481f2b5
SB
4182 int ret;
4183
4184 if (host) {
4185 struct mv_host_priv *hpriv = host->private_data;
63a9332b 4186
6481f2b5
SB
4187 /*
4188 * (Re-)program MBUS remapping windows if we are asked to.
4189 */
63a9332b
AL
4190 dram = mv_mbus_dram_info();
4191 if (dram)
4192 mv_conf_mbus_windows(hpriv, dram);
6481f2b5
SB
4193
4194 /* initialize adapter */
1bfeff03 4195 ret = mv_init_host(host);
6481f2b5
SB
4196 if (ret) {
4197 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4198 return ret;
4199 }
4200 ata_host_resume(host);
4201 }
4202
4203 return 0;
4204}
4205#else
4206#define mv_platform_suspend NULL
4207#define mv_platform_resume NULL
4208#endif
4209
97b414e1 4210#ifdef CONFIG_OF
0ec24914 4211static struct of_device_id mv_sata_dt_ids[] = {
97b414e1
AL
4212 { .compatible = "marvell,orion-sata", },
4213 {},
4214};
4215MODULE_DEVICE_TABLE(of, mv_sata_dt_ids);
4216#endif
4217
f351b2d6 4218static struct platform_driver mv_platform_driver = {
97b414e1 4219 .probe = mv_platform_probe,
0ec24914 4220 .remove = mv_platform_remove,
97b414e1
AL
4221 .suspend = mv_platform_suspend,
4222 .resume = mv_platform_resume,
4223 .driver = {
4224 .name = DRV_NAME,
4225 .owner = THIS_MODULE,
4226 .of_match_table = of_match_ptr(mv_sata_dt_ids),
4227 },
f351b2d6
SB
4228};
4229
4230
7bb3c529 4231#ifdef CONFIG_PCI
f351b2d6
SB
4232static int mv_pci_init_one(struct pci_dev *pdev,
4233 const struct pci_device_id *ent);
b2dec48c
SB
4234#ifdef CONFIG_PM
4235static int mv_pci_device_resume(struct pci_dev *pdev);
4236#endif
f351b2d6 4237
7bb3c529
SB
4238
4239static struct pci_driver mv_pci_driver = {
4240 .name = DRV_NAME,
4241 .id_table = mv_pci_tbl,
f351b2d6 4242 .probe = mv_pci_init_one,
7bb3c529 4243 .remove = ata_pci_remove_one,
b2dec48c
SB
4244#ifdef CONFIG_PM
4245 .suspend = ata_pci_device_suspend,
4246 .resume = mv_pci_device_resume,
4247#endif
4248
7bb3c529
SB
4249};
4250
7bb3c529
SB
4251/* move to PCI layer or libata core? */
4252static int pci_go_64(struct pci_dev *pdev)
4253{
4254 int rc;
4255
6a35528a
YH
4256 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4257 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
7bb3c529 4258 if (rc) {
284901a9 4259 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7bb3c529 4260 if (rc) {
a44fec1f
JP
4261 dev_err(&pdev->dev,
4262 "64-bit DMA enable failed\n");
7bb3c529
SB
4263 return rc;
4264 }
4265 }
4266 } else {
284901a9 4267 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
7bb3c529 4268 if (rc) {
a44fec1f 4269 dev_err(&pdev->dev, "32-bit DMA enable failed\n");
7bb3c529
SB
4270 return rc;
4271 }
284901a9 4272 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
7bb3c529 4273 if (rc) {
a44fec1f
JP
4274 dev_err(&pdev->dev,
4275 "32-bit consistent DMA enable failed\n");
7bb3c529
SB
4276 return rc;
4277 }
4278 }
4279
4280 return rc;
4281}
4282
05b308e1
BR
4283/**
4284 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 4285 * @host: ATA host to print info about
05b308e1
BR
4286 *
4287 * FIXME: complete this.
4288 *
4289 * LOCKING:
4290 * Inherited from caller.
4291 */
4447d351 4292static void mv_print_info(struct ata_host *host)
31961943 4293{
4447d351
TH
4294 struct pci_dev *pdev = to_pci_dev(host->dev);
4295 struct mv_host_priv *hpriv = host->private_data;
44c10138 4296 u8 scc;
c1e4fe71 4297 const char *scc_s, *gen;
31961943
BR
4298
4299 /* Use this to determine the HW stepping of the chip so we know
4300 * what errata to workaround
4301 */
31961943
BR
4302 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4303 if (scc == 0)
4304 scc_s = "SCSI";
4305 else if (scc == 0x01)
4306 scc_s = "RAID";
4307 else
c1e4fe71
JG
4308 scc_s = "?";
4309
4310 if (IS_GEN_I(hpriv))
4311 gen = "I";
4312 else if (IS_GEN_II(hpriv))
4313 gen = "II";
4314 else if (IS_GEN_IIE(hpriv))
4315 gen = "IIE";
4316 else
4317 gen = "?";
31961943 4318
a44fec1f
JP
4319 dev_info(&pdev->dev, "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4320 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4321 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
31961943
BR
4322}
4323
05b308e1 4324/**
f351b2d6 4325 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
4326 * @pdev: PCI device found
4327 * @ent: PCI device ID entry for the matched host
4328 *
4329 * LOCKING:
4330 * Inherited from caller.
4331 */
f351b2d6
SB
4332static int mv_pci_init_one(struct pci_dev *pdev,
4333 const struct pci_device_id *ent)
20f733e7 4334{
20f733e7 4335 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
4336 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4337 struct ata_host *host;
4338 struct mv_host_priv *hpriv;
c4bc7d73 4339 int n_ports, port, rc;
20f733e7 4340
06296a1e 4341 ata_print_version_once(&pdev->dev, DRV_VERSION);
20f733e7 4342
4447d351
TH
4343 /* allocate host */
4344 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4345
4346 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4347 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4348 if (!host || !hpriv)
4349 return -ENOMEM;
4350 host->private_data = hpriv;
f351b2d6 4351 hpriv->n_ports = n_ports;
1bfeff03 4352 hpriv->board_idx = board_idx;
4447d351
TH
4353
4354 /* acquire resources */
24dc5f33
TH
4355 rc = pcim_enable_device(pdev);
4356 if (rc)
20f733e7 4357 return rc;
20f733e7 4358
0d5ff566
TH
4359 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4360 if (rc == -EBUSY)
24dc5f33 4361 pcim_pin_device(pdev);
0d5ff566 4362 if (rc)
24dc5f33 4363 return rc;
4447d351 4364 host->iomap = pcim_iomap_table(pdev);
f351b2d6 4365 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 4366
d88184fb
JG
4367 rc = pci_go_64(pdev);
4368 if (rc)
4369 return rc;
4370
da2fa9ba
ML
4371 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4372 if (rc)
4373 return rc;
4374
c4bc7d73
SB
4375 for (port = 0; port < host->n_ports; port++) {
4376 struct ata_port *ap = host->ports[port];
4377 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4378 unsigned int offset = port_mmio - hpriv->base;
4379
4380 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4381 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4382 }
4383
20f733e7 4384 /* initialize adapter */
1bfeff03 4385 rc = mv_init_host(host);
24dc5f33
TH
4386 if (rc)
4387 return rc;
20f733e7 4388
6d3c30ef
ML
4389 /* Enable message-switched interrupts, if requested */
4390 if (msi && pci_enable_msi(pdev) == 0)
4391 hpriv->hp_flags |= MV_HP_FLAG_MSI;
20f733e7 4392
31961943 4393 mv_dump_pci_cfg(pdev, 0x68);
4447d351 4394 mv_print_info(host);
20f733e7 4395
4447d351 4396 pci_set_master(pdev);
ea8b4db9 4397 pci_try_set_mwi(pdev);
4447d351 4398 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 4399 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 4400}
b2dec48c
SB
4401
4402#ifdef CONFIG_PM
4403static int mv_pci_device_resume(struct pci_dev *pdev)
4404{
d8661921 4405 struct ata_host *host = pci_get_drvdata(pdev);
b2dec48c
SB
4406 int rc;
4407
4408 rc = ata_pci_device_do_resume(pdev);
4409 if (rc)
4410 return rc;
4411
4412 /* initialize adapter */
4413 rc = mv_init_host(host);
4414 if (rc)
4415 return rc;
4416
4417 ata_host_resume(host);
4418
4419 return 0;
4420}
4421#endif
7bb3c529 4422#endif
20f733e7
BR
4423
4424static int __init mv_init(void)
4425{
7bb3c529
SB
4426 int rc = -ENODEV;
4427#ifdef CONFIG_PCI
4428 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
4429 if (rc < 0)
4430 return rc;
4431#endif
4432 rc = platform_driver_register(&mv_platform_driver);
4433
4434#ifdef CONFIG_PCI
4435 if (rc < 0)
4436 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
4437#endif
4438 return rc;
20f733e7
BR
4439}
4440
4441static void __exit mv_exit(void)
4442{
7bb3c529 4443#ifdef CONFIG_PCI
20f733e7 4444 pci_unregister_driver(&mv_pci_driver);
7bb3c529 4445#endif
f351b2d6 4446 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
4447}
4448
4449MODULE_AUTHOR("Brett Russ");
4450MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4451MODULE_LICENSE("GPL");
4452MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4453MODULE_VERSION(DRV_VERSION);
17c5aab5 4454MODULE_ALIAS("platform:" DRV_NAME);
20f733e7
BR
4455
4456module_init(mv_init);
4457module_exit(mv_exit);
This page took 1.035773 seconds and 5 git commands to generate.