Merge branch 'merge-fixes' into devel
[deliverable/linux.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
e12bef50 4 * Copyright 2008: Marvell Corporation, all rights reserved.
8b260248 5 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 6 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
7 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
4a05e209
JG
25/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
1fd2e1c2
ML
33 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
4a05e209
JG
40
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
e49856d8 43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
4a05e209 44
40f0bc2d 45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
4a05e209 46
4a05e209
JG
47 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
4a05e209
JG
65*/
66
20f733e7
BR
67#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
8d8b6004 74#include <linux/dmapool.h>
20f733e7 75#include <linux/dma-mapping.h>
a9524a76 76#include <linux/device.h>
f351b2d6
SB
77#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
15a32632 79#include <linux/mbus.h>
20f733e7 80#include <scsi/scsi_host.h>
193515d5 81#include <scsi/scsi_cmnd.h>
6c08772e 82#include <scsi/scsi_device.h>
20f733e7 83#include <linux/libata.h>
20f733e7
BR
84
85#define DRV_NAME "sata_mv"
1fd2e1c2 86#define DRV_VERSION "1.20"
20f733e7
BR
87
88enum {
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
96
97 MV_PCI_REG_BASE = 0,
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
99 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104
20f733e7 105 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 106 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
20f733e7
BR
109
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
114
31961943
BR
115 MV_MAX_Q_DEPTH = 32,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
31961943
BR
120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 */
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
da2fa9ba 124 MV_MAX_SG_CT = 256,
31961943 125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
31961943 126
20f733e7
BR
127 MV_PORTS_PER_HC = 4,
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT = 2,
31961943 130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
131 MV_PORT_MASK = 3,
132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
7bb3c529 136 /* SoC integrated controllers, no PCI interface */
e12bef50 137 MV_FLAG_SOC = (1 << 28),
7bb3c529 138
c5d3e45a 139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
47c2b677 142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 143
31961943
BR
144 CRQB_FLAG_READ = (1 << 0),
145 CRQB_TAG_SHIFT = 1,
c5d3e45a 146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
e12bef50 147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
c5d3e45a 148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
152
153 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
156
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
158
20f733e7
BR
159 /* PCI interface registers */
160
31961943
BR
161 PCI_COMMAND_OFS = 0xc00,
162
20f733e7
BR
163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
167
522479fb
JG
168 MV_PCI_MODE = 0xd00,
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
178
02a121da
ML
179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182
02a121da
ML
183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
646a4da5 185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
02a121da 186
20f733e7
BR
187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
f351b2d6
SB
189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
20f733e7
BR
191 PORT0_ERR = (1 << 0), /* shift by port # */
192 PORT0_DONE = (1 << 1), /* shift by port # */
193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 PCI_ERR = (1 << 18),
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
e12bef50 206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
8b260248 207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
208 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
209 HC_MAIN_RSVD),
fb621e2f
JG
210 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
211 HC_MAIN_RSVD_5),
f351b2d6 212 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
20f733e7
BR
213
214 /* SATAHC registers */
215 HC_CFG_OFS = 0,
216
217 HC_IRQ_CAUSE_OFS = 0x14,
31961943 218 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
219 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
220 DEV_IRQ = (1 << 8), /* shift by port # */
221
222 /* Shadow block registers */
31961943
BR
223 SHD_BLK_OFS = 0x100,
224 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
225
226 /* SATA registers */
227 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
228 SATA_ACTIVE_OFS = 0x350,
0c58912e 229 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
17c5aab5 230
e12bef50 231 LTMODE_OFS = 0x30c,
17c5aab5
ML
232 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
233
47c2b677 234 PHY_MODE3 = 0x310,
bca1c4eb
JG
235 PHY_MODE4 = 0x314,
236 PHY_MODE2 = 0x330,
e12bef50
ML
237 SATA_IFCTL_OFS = 0x344,
238 SATA_IFSTAT_OFS = 0x34c,
239 VENDOR_UNIQUE_FIS_OFS = 0x35c,
17c5aab5 240
e12bef50 241 FIS_CFG_OFS = 0x360,
17c5aab5
ML
242 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
243
c9d39130
JG
244 MV5_PHY_MODE = 0x74,
245 MV5_LT_MODE = 0x30,
246 MV5_PHY_CTL = 0x0C,
e12bef50 247 SATA_INTERFACE_CFG = 0x050,
bca1c4eb
JG
248
249 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
250
251 /* Port registers */
252 EDMA_CFG_OFS = 0,
0c58912e
ML
253 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
254 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
255 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
256 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
257 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
e12bef50
ML
258 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
259 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
20f733e7
BR
260
261 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
262 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
263 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
264 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
265 EDMA_ERR_DEV = (1 << 2), /* device error */
266 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
267 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
268 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
269 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
270 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 271 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 272 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
273 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
274 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
275 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
276 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
646a4da5 277
6c1153e0 278 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
646a4da5
ML
279 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
280 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
281 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
282 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
283
6c1153e0 284 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
646a4da5 285
6c1153e0 286 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
646a4da5
ML
287 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
288 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
289 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
290 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
291 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
292
6c1153e0 293 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
646a4da5 294
6c1153e0 295 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
296 EDMA_ERR_OVERRUN_5 = (1 << 5),
297 EDMA_ERR_UNDERRUN_5 = (1 << 6),
646a4da5
ML
298
299 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
300 EDMA_ERR_LNK_CTRL_RX_1 |
301 EDMA_ERR_LNK_CTRL_RX_3 |
40f0bc2d
ML
302 EDMA_ERR_LNK_CTRL_TX |
303 /* temporary, until we fix hotplug: */
304 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
646a4da5 305
bdd4ddde
JG
306 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
307 EDMA_ERR_PRD_PAR |
308 EDMA_ERR_DEV_DCON |
309 EDMA_ERR_DEV_CON |
310 EDMA_ERR_SERR |
311 EDMA_ERR_SELF_DIS |
6c1153e0 312 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
313 EDMA_ERR_CRPB_PAR |
314 EDMA_ERR_INTRL_PAR |
315 EDMA_ERR_IORDY |
316 EDMA_ERR_LNK_CTRL_RX_2 |
317 EDMA_ERR_LNK_DATA_RX |
318 EDMA_ERR_LNK_DATA_TX |
319 EDMA_ERR_TRANS_PROTO,
e12bef50 320
bdd4ddde
JG
321 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
322 EDMA_ERR_PRD_PAR |
323 EDMA_ERR_DEV_DCON |
324 EDMA_ERR_DEV_CON |
325 EDMA_ERR_OVERRUN_5 |
326 EDMA_ERR_UNDERRUN_5 |
327 EDMA_ERR_SELF_DIS_5 |
6c1153e0 328 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
329 EDMA_ERR_CRPB_PAR |
330 EDMA_ERR_INTRL_PAR |
331 EDMA_ERR_IORDY,
20f733e7 332
31961943
BR
333 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
334 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
335
336 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
337 EDMA_REQ_Q_PTR_SHIFT = 5,
338
339 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
340 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
341 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
342 EDMA_RSP_Q_PTR_SHIFT = 3,
343
0ea9e179
JG
344 EDMA_CMD_OFS = 0x28, /* EDMA command register */
345 EDMA_EN = (1 << 0), /* enable EDMA */
346 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
347 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 348
c9d39130 349 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 350 EDMA_ARB_CFG = 0x38,
bca1c4eb 351
31961943
BR
352 /* Host private flags (hp_flags) */
353 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
354 MV_HP_ERRATA_50XXB0 = (1 << 1),
355 MV_HP_ERRATA_50XXB2 = (1 << 2),
356 MV_HP_ERRATA_60X1B2 = (1 << 3),
357 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 358 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
359 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
360 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
361 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 362 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 363
31961943 364 /* Port private flags (pp_flags) */
0ea9e179 365 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
72109168 366 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
20f733e7
BR
367};
368
ee9ccdf7
JG
369#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
370#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 371#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
7bb3c529 372#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
bca1c4eb 373
15a32632
LB
374#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
375#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
376
095fec88 377enum {
baf14aa1
JG
378 /* DMA boundary 0xffff is required by the s/g splitting
379 * we need on /length/ in mv_fill-sg().
380 */
381 MV_DMA_BOUNDARY = 0xffffU,
095fec88 382
0ea9e179
JG
383 /* mask of register bits containing lower 32 bits
384 * of EDMA request queue DMA address
385 */
095fec88
JG
386 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
387
0ea9e179 388 /* ditto, for response queue */
095fec88
JG
389 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
390};
391
522479fb
JG
392enum chip_type {
393 chip_504x,
394 chip_508x,
395 chip_5080,
396 chip_604x,
397 chip_608x,
e4e7b892
JG
398 chip_6042,
399 chip_7042,
f351b2d6 400 chip_soc,
522479fb
JG
401};
402
31961943
BR
403/* Command ReQuest Block: 32B */
404struct mv_crqb {
e1469874
ML
405 __le32 sg_addr;
406 __le32 sg_addr_hi;
407 __le16 ctrl_flags;
408 __le16 ata_cmd[11];
31961943 409};
20f733e7 410
e4e7b892 411struct mv_crqb_iie {
e1469874
ML
412 __le32 addr;
413 __le32 addr_hi;
414 __le32 flags;
415 __le32 len;
416 __le32 ata_cmd[4];
e4e7b892
JG
417};
418
31961943
BR
419/* Command ResPonse Block: 8B */
420struct mv_crpb {
e1469874
ML
421 __le16 id;
422 __le16 flags;
423 __le32 tmstmp;
20f733e7
BR
424};
425
31961943
BR
426/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
427struct mv_sg {
e1469874
ML
428 __le32 addr;
429 __le32 flags_size;
430 __le32 addr_hi;
431 __le32 reserved;
31961943 432};
20f733e7 433
31961943
BR
434struct mv_port_priv {
435 struct mv_crqb *crqb;
436 dma_addr_t crqb_dma;
437 struct mv_crpb *crpb;
438 dma_addr_t crpb_dma;
eb73d558
ML
439 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
440 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
bdd4ddde
JG
441
442 unsigned int req_idx;
443 unsigned int resp_idx;
444
31961943
BR
445 u32 pp_flags;
446};
447
bca1c4eb
JG
448struct mv_port_signal {
449 u32 amps;
450 u32 pre;
451};
452
02a121da
ML
453struct mv_host_priv {
454 u32 hp_flags;
455 struct mv_port_signal signal[8];
456 const struct mv_hw_ops *ops;
f351b2d6
SB
457 int n_ports;
458 void __iomem *base;
459 void __iomem *main_cause_reg_addr;
460 void __iomem *main_mask_reg_addr;
02a121da
ML
461 u32 irq_cause_ofs;
462 u32 irq_mask_ofs;
463 u32 unmask_all_irqs;
da2fa9ba
ML
464 /*
465 * These consistent DMA memory pools give us guaranteed
466 * alignment for hardware-accessed data structures,
467 * and less memory waste in accomplishing the alignment.
468 */
469 struct dma_pool *crqb_pool;
470 struct dma_pool *crpb_pool;
471 struct dma_pool *sg_tbl_pool;
02a121da
ML
472};
473
47c2b677 474struct mv_hw_ops {
2a47ce06
JG
475 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
476 unsigned int port);
47c2b677
JG
477 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
478 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
479 void __iomem *mmio);
c9d39130
JG
480 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
481 unsigned int n_hc);
522479fb 482 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 483 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
47c2b677
JG
484};
485
da3dbb17
TH
486static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
487static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
488static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
489static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
490static int mv_port_start(struct ata_port *ap);
491static void mv_port_stop(struct ata_port *ap);
492static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 493static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 494static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
a1efdaba
TH
495static int mv_hardreset(struct ata_link *link, unsigned int *class,
496 unsigned long deadline);
bdd4ddde
JG
497static void mv_eh_freeze(struct ata_port *ap);
498static void mv_eh_thaw(struct ata_port *ap);
f273827e 499static void mv6_dev_config(struct ata_device *dev);
20f733e7 500
2a47ce06
JG
501static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
502 unsigned int port);
47c2b677
JG
503static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
504static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
505 void __iomem *mmio);
c9d39130
JG
506static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int n_hc);
522479fb 508static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
7bb3c529 509static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
47c2b677 510
2a47ce06
JG
511static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
512 unsigned int port);
47c2b677
JG
513static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
514static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
515 void __iomem *mmio);
c9d39130
JG
516static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
517 unsigned int n_hc);
522479fb 518static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
f351b2d6
SB
519static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
520 void __iomem *mmio);
521static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
522 void __iomem *mmio);
523static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
524 void __iomem *mmio, unsigned int n_hc);
525static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
526 void __iomem *mmio);
527static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
7bb3c529 528static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
e12bef50 529static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130 530 unsigned int port_no);
e12bef50 531static int mv_stop_edma(struct ata_port *ap);
b562468c 532static int mv_stop_edma_engine(void __iomem *port_mmio);
e12bef50 533static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
47c2b677 534
e49856d8
ML
535static void mv_pmp_select(struct ata_port *ap, int pmp);
536static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
537 unsigned long deadline);
538static int mv_softreset(struct ata_link *link, unsigned int *class,
539 unsigned long deadline);
47c2b677 540
eb73d558
ML
541/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
542 * because we have to allow room for worst case splitting of
543 * PRDs for 64K boundaries in mv_fill_sg().
544 */
c5d3e45a 545static struct scsi_host_template mv5_sht = {
68d1d07b 546 ATA_BASE_SHT(DRV_NAME),
baf14aa1 547 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a 548 .dma_boundary = MV_DMA_BOUNDARY,
c5d3e45a
JG
549};
550
551static struct scsi_host_template mv6_sht = {
68d1d07b 552 ATA_NCQ_SHT(DRV_NAME),
138bfdd0 553 .can_queue = MV_MAX_Q_DEPTH - 1,
baf14aa1 554 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7 555 .dma_boundary = MV_DMA_BOUNDARY,
20f733e7
BR
556};
557
029cfd6b
TH
558static struct ata_port_operations mv5_ops = {
559 .inherits = &ata_sff_port_ops,
c9d39130
JG
560
561 .qc_prep = mv_qc_prep,
562 .qc_issue = mv_qc_issue,
c9d39130 563
bdd4ddde
JG
564 .freeze = mv_eh_freeze,
565 .thaw = mv_eh_thaw,
a1efdaba 566 .hardreset = mv_hardreset,
a1efdaba 567 .error_handler = ata_std_error_handler, /* avoid SFF EH */
029cfd6b 568 .post_internal_cmd = ATA_OP_NULL,
bdd4ddde 569
c9d39130
JG
570 .scr_read = mv5_scr_read,
571 .scr_write = mv5_scr_write,
572
573 .port_start = mv_port_start,
574 .port_stop = mv_port_stop,
c9d39130
JG
575};
576
029cfd6b
TH
577static struct ata_port_operations mv6_ops = {
578 .inherits = &mv5_ops,
e49856d8 579 .qc_defer = sata_pmp_qc_defer_cmd_switch,
f273827e 580 .dev_config = mv6_dev_config,
20f733e7
BR
581 .scr_read = mv_scr_read,
582 .scr_write = mv_scr_write,
583
e49856d8
ML
584 .pmp_hardreset = mv_pmp_hardreset,
585 .pmp_softreset = mv_softreset,
586 .softreset = mv_softreset,
587 .error_handler = sata_pmp_error_handler,
20f733e7
BR
588};
589
029cfd6b
TH
590static struct ata_port_operations mv_iie_ops = {
591 .inherits = &mv6_ops,
e49856d8 592 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
029cfd6b 593 .dev_config = ATA_OP_NULL,
e4e7b892 594 .qc_prep = mv_qc_prep_iie,
e4e7b892
JG
595};
596
98ac62de 597static const struct ata_port_info mv_port_info[] = {
20f733e7 598 { /* chip_504x */
cca3974e 599 .flags = MV_COMMON_FLAGS,
31961943 600 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 601 .udma_mask = ATA_UDMA6,
c9d39130 602 .port_ops = &mv5_ops,
20f733e7
BR
603 },
604 { /* chip_508x */
c5d3e45a 605 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 606 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 607 .udma_mask = ATA_UDMA6,
c9d39130 608 .port_ops = &mv5_ops,
20f733e7 609 },
47c2b677 610 { /* chip_5080 */
c5d3e45a 611 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 612 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 613 .udma_mask = ATA_UDMA6,
c9d39130 614 .port_ops = &mv5_ops,
47c2b677 615 },
20f733e7 616 { /* chip_604x */
138bfdd0 617 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 618 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 619 ATA_FLAG_NCQ,
31961943 620 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 621 .udma_mask = ATA_UDMA6,
c9d39130 622 .port_ops = &mv6_ops,
20f733e7
BR
623 },
624 { /* chip_608x */
c5d3e45a 625 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 626 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 627 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
31961943 628 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 629 .udma_mask = ATA_UDMA6,
c9d39130 630 .port_ops = &mv6_ops,
20f733e7 631 },
e4e7b892 632 { /* chip_6042 */
138bfdd0 633 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 634 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 635 ATA_FLAG_NCQ,
e4e7b892 636 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 637 .udma_mask = ATA_UDMA6,
e4e7b892
JG
638 .port_ops = &mv_iie_ops,
639 },
640 { /* chip_7042 */
138bfdd0 641 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 642 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
138bfdd0 643 ATA_FLAG_NCQ,
e4e7b892 644 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 645 .udma_mask = ATA_UDMA6,
e4e7b892
JG
646 .port_ops = &mv_iie_ops,
647 },
f351b2d6 648 { /* chip_soc */
02c1f32f 649 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
e49856d8 650 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
02c1f32f 651 ATA_FLAG_NCQ | MV_FLAG_SOC,
17c5aab5
ML
652 .pio_mask = 0x1f, /* pio0-4 */
653 .udma_mask = ATA_UDMA6,
654 .port_ops = &mv_iie_ops,
f351b2d6 655 },
20f733e7
BR
656};
657
3b7d697d 658static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
659 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
660 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
661 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
662 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
663 /* RocketRAID 1740/174x have different identifiers */
664 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
665 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
666
667 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
668 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
669 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
670 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
671 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
672
673 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
674
d9f9c6bc
FA
675 /* Adaptec 1430SA */
676 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
677
02a121da 678 /* Marvell 7042 support */
6a3d586d
MT
679 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
680
02a121da
ML
681 /* Highpoint RocketRAID PCIe series */
682 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
683 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
684
2d2744fc 685 { } /* terminate list */
20f733e7
BR
686};
687
47c2b677
JG
688static const struct mv_hw_ops mv5xxx_ops = {
689 .phy_errata = mv5_phy_errata,
690 .enable_leds = mv5_enable_leds,
691 .read_preamp = mv5_read_preamp,
692 .reset_hc = mv5_reset_hc,
522479fb
JG
693 .reset_flash = mv5_reset_flash,
694 .reset_bus = mv5_reset_bus,
47c2b677
JG
695};
696
697static const struct mv_hw_ops mv6xxx_ops = {
698 .phy_errata = mv6_phy_errata,
699 .enable_leds = mv6_enable_leds,
700 .read_preamp = mv6_read_preamp,
701 .reset_hc = mv6_reset_hc,
522479fb
JG
702 .reset_flash = mv6_reset_flash,
703 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
704};
705
f351b2d6
SB
706static const struct mv_hw_ops mv_soc_ops = {
707 .phy_errata = mv6_phy_errata,
708 .enable_leds = mv_soc_enable_leds,
709 .read_preamp = mv_soc_read_preamp,
710 .reset_hc = mv_soc_reset_hc,
711 .reset_flash = mv_soc_reset_flash,
712 .reset_bus = mv_soc_reset_bus,
713};
714
20f733e7
BR
715/*
716 * Functions
717 */
718
719static inline void writelfl(unsigned long data, void __iomem *addr)
720{
721 writel(data, addr);
722 (void) readl(addr); /* flush to avoid PCI posted write */
723}
724
20f733e7
BR
725static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
726{
727 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
728}
729
c9d39130
JG
730static inline unsigned int mv_hc_from_port(unsigned int port)
731{
732 return port >> MV_PORT_HC_SHIFT;
733}
734
735static inline unsigned int mv_hardport_from_port(unsigned int port)
736{
737 return port & MV_PORT_MASK;
738}
739
740static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
741 unsigned int port)
742{
743 return mv_hc_base(base, mv_hc_from_port(port));
744}
745
20f733e7
BR
746static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
747{
c9d39130 748 return mv_hc_base_from_port(base, port) +
8b260248 749 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 750 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
751}
752
e12bef50
ML
753static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
754{
755 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
756 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
757
758 return hc_mmio + ofs;
759}
760
f351b2d6
SB
761static inline void __iomem *mv_host_base(struct ata_host *host)
762{
763 struct mv_host_priv *hpriv = host->private_data;
764 return hpriv->base;
765}
766
20f733e7
BR
767static inline void __iomem *mv_ap_base(struct ata_port *ap)
768{
f351b2d6 769 return mv_port_base(mv_host_base(ap->host), ap->port_no);
20f733e7
BR
770}
771
cca3974e 772static inline int mv_get_hc_count(unsigned long port_flags)
31961943 773{
cca3974e 774 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
775}
776
c5d3e45a
JG
777static void mv_set_edma_ptrs(void __iomem *port_mmio,
778 struct mv_host_priv *hpriv,
779 struct mv_port_priv *pp)
780{
bdd4ddde
JG
781 u32 index;
782
c5d3e45a
JG
783 /*
784 * initialize request queue
785 */
bdd4ddde
JG
786 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
787
c5d3e45a
JG
788 WARN_ON(pp->crqb_dma & 0x3ff);
789 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 790 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
791 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
792
793 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 794 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
795 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
796 else
bdd4ddde 797 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
798
799 /*
800 * initialize response queue
801 */
bdd4ddde
JG
802 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
803
c5d3e45a
JG
804 WARN_ON(pp->crpb_dma & 0xff);
805 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
806
807 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 808 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
809 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
810 else
bdd4ddde 811 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 812
bdd4ddde 813 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 814 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
815}
816
05b308e1
BR
817/**
818 * mv_start_dma - Enable eDMA engine
819 * @base: port base address
820 * @pp: port private data
821 *
beec7dbc
TH
822 * Verify the local cache of the eDMA state is accurate with a
823 * WARN_ON.
05b308e1
BR
824 *
825 * LOCKING:
826 * Inherited from caller.
827 */
0c58912e 828static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
72109168 829 struct mv_port_priv *pp, u8 protocol)
20f733e7 830{
72109168
ML
831 int want_ncq = (protocol == ATA_PROT_NCQ);
832
833 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
834 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
835 if (want_ncq != using_ncq)
b562468c 836 mv_stop_edma(ap);
72109168 837 }
c5d3e45a 838 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
0c58912e
ML
839 struct mv_host_priv *hpriv = ap->host->private_data;
840 int hard_port = mv_hardport_from_port(ap->port_no);
841 void __iomem *hc_mmio = mv_hc_base_from_port(
0fca0d6f 842 mv_host_base(ap->host), hard_port);
0c58912e
ML
843 u32 hc_irq_cause, ipending;
844
bdd4ddde 845 /* clear EDMA event indicators, if any */
f630d562 846 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
bdd4ddde 847
0c58912e
ML
848 /* clear EDMA interrupt indicator, if any */
849 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
850 ipending = (DEV_IRQ << hard_port) |
851 (CRPB_DMA_DONE << hard_port);
852 if (hc_irq_cause & ipending) {
853 writelfl(hc_irq_cause & ~ipending,
854 hc_mmio + HC_IRQ_CAUSE_OFS);
855 }
856
e12bef50 857 mv_edma_cfg(ap, want_ncq);
0c58912e
ML
858
859 /* clear FIS IRQ Cause */
860 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
861
f630d562 862 mv_set_edma_ptrs(port_mmio, hpriv, pp);
bdd4ddde 863
f630d562 864 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
afb0edd9
BR
865 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
866 }
f630d562 867 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
20f733e7
BR
868}
869
05b308e1 870/**
e12bef50 871 * mv_stop_edma_engine - Disable eDMA engine
b562468c 872 * @port_mmio: io base address
05b308e1
BR
873 *
874 * LOCKING:
875 * Inherited from caller.
876 */
b562468c 877static int mv_stop_edma_engine(void __iomem *port_mmio)
20f733e7 878{
b562468c 879 int i;
31961943 880
b562468c
ML
881 /* Disable eDMA. The disable bit auto clears. */
882 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
8b260248 883
b562468c
ML
884 /* Wait for the chip to confirm eDMA is off. */
885 for (i = 10000; i > 0; i--) {
886 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 887 if (!(reg & EDMA_EN))
b562468c
ML
888 return 0;
889 udelay(10);
31961943 890 }
b562468c 891 return -EIO;
20f733e7
BR
892}
893
e12bef50 894static int mv_stop_edma(struct ata_port *ap)
0ea9e179 895{
b562468c
ML
896 void __iomem *port_mmio = mv_ap_base(ap);
897 struct mv_port_priv *pp = ap->private_data;
0ea9e179 898
b562468c
ML
899 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
900 return 0;
901 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
902 if (mv_stop_edma_engine(port_mmio)) {
903 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
904 return -EIO;
905 }
906 return 0;
0ea9e179
JG
907}
908
8a70f8dc 909#ifdef ATA_DEBUG
31961943 910static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 911{
31961943
BR
912 int b, w;
913 for (b = 0; b < bytes; ) {
914 DPRINTK("%p: ", start + b);
915 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 916 printk("%08x ", readl(start + b));
31961943
BR
917 b += sizeof(u32);
918 }
919 printk("\n");
920 }
31961943 921}
8a70f8dc
JG
922#endif
923
31961943
BR
924static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
925{
926#ifdef ATA_DEBUG
927 int b, w;
928 u32 dw;
929 for (b = 0; b < bytes; ) {
930 DPRINTK("%02x: ", b);
931 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
932 (void) pci_read_config_dword(pdev, b, &dw);
933 printk("%08x ", dw);
31961943
BR
934 b += sizeof(u32);
935 }
936 printk("\n");
937 }
938#endif
939}
940static void mv_dump_all_regs(void __iomem *mmio_base, int port,
941 struct pci_dev *pdev)
942{
943#ifdef ATA_DEBUG
8b260248 944 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
945 port >> MV_PORT_HC_SHIFT);
946 void __iomem *port_base;
947 int start_port, num_ports, p, start_hc, num_hcs, hc;
948
949 if (0 > port) {
950 start_hc = start_port = 0;
951 num_ports = 8; /* shld be benign for 4 port devs */
952 num_hcs = 2;
953 } else {
954 start_hc = port >> MV_PORT_HC_SHIFT;
955 start_port = port;
956 num_ports = num_hcs = 1;
957 }
8b260248 958 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
959 num_ports > 1 ? num_ports - 1 : start_port);
960
961 if (NULL != pdev) {
962 DPRINTK("PCI config space regs:\n");
963 mv_dump_pci_cfg(pdev, 0x68);
964 }
965 DPRINTK("PCI regs:\n");
966 mv_dump_mem(mmio_base+0xc00, 0x3c);
967 mv_dump_mem(mmio_base+0xd00, 0x34);
968 mv_dump_mem(mmio_base+0xf00, 0x4);
969 mv_dump_mem(mmio_base+0x1d00, 0x6c);
970 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 971 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
972 DPRINTK("HC regs (HC %i):\n", hc);
973 mv_dump_mem(hc_base, 0x1c);
974 }
975 for (p = start_port; p < start_port + num_ports; p++) {
976 port_base = mv_port_base(mmio_base, p);
2dcb407e 977 DPRINTK("EDMA regs (port %i):\n", p);
31961943 978 mv_dump_mem(port_base, 0x54);
2dcb407e 979 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
980 mv_dump_mem(port_base+0x300, 0x60);
981 }
982#endif
20f733e7
BR
983}
984
985static unsigned int mv_scr_offset(unsigned int sc_reg_in)
986{
987 unsigned int ofs;
988
989 switch (sc_reg_in) {
990 case SCR_STATUS:
991 case SCR_CONTROL:
992 case SCR_ERROR:
993 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
994 break;
995 case SCR_ACTIVE:
996 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
997 break;
998 default:
999 ofs = 0xffffffffU;
1000 break;
1001 }
1002 return ofs;
1003}
1004
da3dbb17 1005static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
1006{
1007 unsigned int ofs = mv_scr_offset(sc_reg_in);
1008
da3dbb17
TH
1009 if (ofs != 0xffffffffU) {
1010 *val = readl(mv_ap_base(ap) + ofs);
1011 return 0;
1012 } else
1013 return -EINVAL;
20f733e7
BR
1014}
1015
da3dbb17 1016static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
1017{
1018 unsigned int ofs = mv_scr_offset(sc_reg_in);
1019
da3dbb17 1020 if (ofs != 0xffffffffU) {
20f733e7 1021 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1022 return 0;
1023 } else
1024 return -EINVAL;
20f733e7
BR
1025}
1026
f273827e
ML
1027static void mv6_dev_config(struct ata_device *adev)
1028{
1029 /*
e49856d8
ML
1030 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1031 *
1032 * Gen-II does not support NCQ over a port multiplier
1033 * (no FIS-based switching).
1034 *
f273827e
ML
1035 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1036 * See mv_qc_prep() for more info.
1037 */
e49856d8
ML
1038 if (adev->flags & ATA_DFLAG_NCQ) {
1039 if (sata_pmp_attached(adev->link->ap))
1040 adev->flags &= ~ATA_DFLAG_NCQ;
1041 else if (adev->max_sectors > ATA_MAX_SECTORS)
f273827e 1042 adev->max_sectors = ATA_MAX_SECTORS;
e49856d8 1043 }
f273827e
ML
1044}
1045
e49856d8
ML
1046static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1047{
1048 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1049 /*
1050 * Various bit settings required for operation
1051 * in FIS-based switching (fbs) mode on GenIIe:
1052 */
1053 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1054 old_ltmode = readl(port_mmio + LTMODE_OFS);
1055 if (enable_fbs) {
1056 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1057 new_ltmode = old_ltmode | LTMODE_BIT8;
1058 } else { /* disable fbs */
1059 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1060 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1061 }
1062 if (new_fcfg != old_fcfg)
1063 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1064 if (new_ltmode != old_ltmode)
1065 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
f273827e
ML
1066}
1067
e12bef50 1068static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
e4e7b892 1069{
0c58912e 1070 u32 cfg;
e12bef50
ML
1071 struct mv_port_priv *pp = ap->private_data;
1072 struct mv_host_priv *hpriv = ap->host->private_data;
1073 void __iomem *port_mmio = mv_ap_base(ap);
e4e7b892
JG
1074
1075 /* set up non-NCQ EDMA configuration */
0c58912e 1076 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
e4e7b892 1077
0c58912e 1078 if (IS_GEN_I(hpriv))
e4e7b892
JG
1079 cfg |= (1 << 8); /* enab config burst size mask */
1080
0c58912e 1081 else if (IS_GEN_II(hpriv))
e4e7b892
JG
1082 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1083
1084 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1085 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1086 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892 1087 cfg |= (1 << 18); /* enab early completion */
e728eabe 1088 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
e49856d8
ML
1089
1090 if (want_ncq && sata_pmp_attached(ap)) {
1091 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1092 mv_config_fbs(port_mmio, 1);
1093 } else {
1094 mv_config_fbs(port_mmio, 0);
1095 }
e4e7b892
JG
1096 }
1097
72109168
ML
1098 if (want_ncq) {
1099 cfg |= EDMA_CFG_NCQ;
1100 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1101 } else
1102 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1103
e4e7b892
JG
1104 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1105}
1106
da2fa9ba
ML
1107static void mv_port_free_dma_mem(struct ata_port *ap)
1108{
1109 struct mv_host_priv *hpriv = ap->host->private_data;
1110 struct mv_port_priv *pp = ap->private_data;
eb73d558 1111 int tag;
da2fa9ba
ML
1112
1113 if (pp->crqb) {
1114 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1115 pp->crqb = NULL;
1116 }
1117 if (pp->crpb) {
1118 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1119 pp->crpb = NULL;
1120 }
eb73d558
ML
1121 /*
1122 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1123 * For later hardware, we have one unique sg_tbl per NCQ tag.
1124 */
1125 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1126 if (pp->sg_tbl[tag]) {
1127 if (tag == 0 || !IS_GEN_I(hpriv))
1128 dma_pool_free(hpriv->sg_tbl_pool,
1129 pp->sg_tbl[tag],
1130 pp->sg_tbl_dma[tag]);
1131 pp->sg_tbl[tag] = NULL;
1132 }
da2fa9ba
ML
1133 }
1134}
1135
05b308e1
BR
1136/**
1137 * mv_port_start - Port specific init/start routine.
1138 * @ap: ATA channel to manipulate
1139 *
1140 * Allocate and point to DMA memory, init port private memory,
1141 * zero indices.
1142 *
1143 * LOCKING:
1144 * Inherited from caller.
1145 */
31961943
BR
1146static int mv_port_start(struct ata_port *ap)
1147{
cca3974e
JG
1148 struct device *dev = ap->host->dev;
1149 struct mv_host_priv *hpriv = ap->host->private_data;
31961943 1150 struct mv_port_priv *pp;
dde20207 1151 int tag;
31961943 1152
24dc5f33 1153 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1154 if (!pp)
24dc5f33 1155 return -ENOMEM;
da2fa9ba 1156 ap->private_data = pp;
31961943 1157
da2fa9ba
ML
1158 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1159 if (!pp->crqb)
1160 return -ENOMEM;
1161 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
31961943 1162
da2fa9ba
ML
1163 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1164 if (!pp->crpb)
1165 goto out_port_free_dma_mem;
1166 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
31961943 1167
eb73d558
ML
1168 /*
1169 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1170 * For later hardware, we need one unique sg_tbl per NCQ tag.
1171 */
1172 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1173 if (tag == 0 || !IS_GEN_I(hpriv)) {
1174 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1175 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1176 if (!pp->sg_tbl[tag])
1177 goto out_port_free_dma_mem;
1178 } else {
1179 pp->sg_tbl[tag] = pp->sg_tbl[0];
1180 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1181 }
1182 }
31961943 1183 return 0;
da2fa9ba
ML
1184
1185out_port_free_dma_mem:
1186 mv_port_free_dma_mem(ap);
1187 return -ENOMEM;
31961943
BR
1188}
1189
05b308e1
BR
1190/**
1191 * mv_port_stop - Port specific cleanup/stop routine.
1192 * @ap: ATA channel to manipulate
1193 *
1194 * Stop DMA, cleanup port memory.
1195 *
1196 * LOCKING:
cca3974e 1197 * This routine uses the host lock to protect the DMA stop.
05b308e1 1198 */
31961943
BR
1199static void mv_port_stop(struct ata_port *ap)
1200{
e12bef50 1201 mv_stop_edma(ap);
da2fa9ba 1202 mv_port_free_dma_mem(ap);
31961943
BR
1203}
1204
05b308e1
BR
1205/**
1206 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1207 * @qc: queued command whose SG list to source from
1208 *
1209 * Populate the SG list and mark the last entry.
1210 *
1211 * LOCKING:
1212 * Inherited from caller.
1213 */
6c08772e 1214static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1215{
1216 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1217 struct scatterlist *sg;
3be6cbd7 1218 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1219 unsigned int si;
31961943 1220
eb73d558 1221 mv_sg = pp->sg_tbl[qc->tag];
ff2aeb1e 1222 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1223 dma_addr_t addr = sg_dma_address(sg);
1224 u32 sg_len = sg_dma_len(sg);
22374677 1225
4007b493
OJ
1226 while (sg_len) {
1227 u32 offset = addr & 0xffff;
1228 u32 len = sg_len;
22374677 1229
4007b493
OJ
1230 if ((offset + sg_len > 0x10000))
1231 len = 0x10000 - offset;
1232
1233 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1234 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1235 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1236
1237 sg_len -= len;
1238 addr += len;
1239
3be6cbd7 1240 last_sg = mv_sg;
4007b493 1241 mv_sg++;
4007b493 1242 }
31961943 1243 }
3be6cbd7
JG
1244
1245 if (likely(last_sg))
1246 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1247}
1248
5796d1c4 1249static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1250{
559eedad 1251 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1252 (last ? CRQB_CMD_LAST : 0);
559eedad 1253 *cmdw = cpu_to_le16(tmp);
31961943
BR
1254}
1255
05b308e1
BR
1256/**
1257 * mv_qc_prep - Host specific command preparation.
1258 * @qc: queued command to prepare
1259 *
1260 * This routine simply redirects to the general purpose routine
1261 * if command is not DMA. Else, it handles prep of the CRQB
1262 * (command request block), does some sanity checking, and calls
1263 * the SG load routine.
1264 *
1265 * LOCKING:
1266 * Inherited from caller.
1267 */
31961943
BR
1268static void mv_qc_prep(struct ata_queued_cmd *qc)
1269{
1270 struct ata_port *ap = qc->ap;
1271 struct mv_port_priv *pp = ap->private_data;
e1469874 1272 __le16 *cw;
31961943
BR
1273 struct ata_taskfile *tf;
1274 u16 flags = 0;
a6432436 1275 unsigned in_index;
31961943 1276
138bfdd0
ML
1277 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1278 (qc->tf.protocol != ATA_PROT_NCQ))
31961943 1279 return;
20f733e7 1280
31961943
BR
1281 /* Fill in command request block
1282 */
e4e7b892 1283 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1284 flags |= CRQB_FLAG_READ;
beec7dbc 1285 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1286 flags |= qc->tag << CRQB_TAG_SHIFT;
e49856d8 1287 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
31961943 1288
bdd4ddde
JG
1289 /* get current queue index from software */
1290 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1291
1292 pp->crqb[in_index].sg_addr =
eb73d558 1293 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
a6432436 1294 pp->crqb[in_index].sg_addr_hi =
eb73d558 1295 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
a6432436 1296 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1297
a6432436 1298 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1299 tf = &qc->tf;
1300
1301 /* Sadly, the CRQB cannot accomodate all registers--there are
1302 * only 11 bytes...so we must pick and choose required
1303 * registers based on the command. So, we drop feature and
1304 * hob_feature for [RW] DMA commands, but they are needed for
1305 * NCQ. NCQ will drop hob_nsect.
20f733e7 1306 */
31961943
BR
1307 switch (tf->command) {
1308 case ATA_CMD_READ:
1309 case ATA_CMD_READ_EXT:
1310 case ATA_CMD_WRITE:
1311 case ATA_CMD_WRITE_EXT:
c15d85c8 1312 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1313 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1314 break;
31961943
BR
1315 case ATA_CMD_FPDMA_READ:
1316 case ATA_CMD_FPDMA_WRITE:
8b260248 1317 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1318 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1319 break;
31961943
BR
1320 default:
1321 /* The only other commands EDMA supports in non-queued and
1322 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1323 * of which are defined/used by Linux. If we get here, this
1324 * driver needs work.
1325 *
1326 * FIXME: modify libata to give qc_prep a return value and
1327 * return error here.
1328 */
1329 BUG_ON(tf->command);
1330 break;
1331 }
1332 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1333 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1334 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1335 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1336 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1337 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1338 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1339 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1340 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1341
e4e7b892
JG
1342 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1343 return;
1344 mv_fill_sg(qc);
1345}
1346
1347/**
1348 * mv_qc_prep_iie - Host specific command preparation.
1349 * @qc: queued command to prepare
1350 *
1351 * This routine simply redirects to the general purpose routine
1352 * if command is not DMA. Else, it handles prep of the CRQB
1353 * (command request block), does some sanity checking, and calls
1354 * the SG load routine.
1355 *
1356 * LOCKING:
1357 * Inherited from caller.
1358 */
1359static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1360{
1361 struct ata_port *ap = qc->ap;
1362 struct mv_port_priv *pp = ap->private_data;
1363 struct mv_crqb_iie *crqb;
1364 struct ata_taskfile *tf;
a6432436 1365 unsigned in_index;
e4e7b892
JG
1366 u32 flags = 0;
1367
138bfdd0
ML
1368 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1369 (qc->tf.protocol != ATA_PROT_NCQ))
e4e7b892
JG
1370 return;
1371
e12bef50 1372 /* Fill in Gen IIE command request block */
e4e7b892
JG
1373 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1374 flags |= CRQB_FLAG_READ;
1375
beec7dbc 1376 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1377 flags |= qc->tag << CRQB_TAG_SHIFT;
8c0aeb4a 1378 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
e49856d8 1379 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
e4e7b892 1380
bdd4ddde
JG
1381 /* get current queue index from software */
1382 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1383
1384 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
eb73d558
ML
1385 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1386 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
e4e7b892
JG
1387 crqb->flags = cpu_to_le32(flags);
1388
1389 tf = &qc->tf;
1390 crqb->ata_cmd[0] = cpu_to_le32(
1391 (tf->command << 16) |
1392 (tf->feature << 24)
1393 );
1394 crqb->ata_cmd[1] = cpu_to_le32(
1395 (tf->lbal << 0) |
1396 (tf->lbam << 8) |
1397 (tf->lbah << 16) |
1398 (tf->device << 24)
1399 );
1400 crqb->ata_cmd[2] = cpu_to_le32(
1401 (tf->hob_lbal << 0) |
1402 (tf->hob_lbam << 8) |
1403 (tf->hob_lbah << 16) |
1404 (tf->hob_feature << 24)
1405 );
1406 crqb->ata_cmd[3] = cpu_to_le32(
1407 (tf->nsect << 0) |
1408 (tf->hob_nsect << 8)
1409 );
1410
1411 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1412 return;
31961943
BR
1413 mv_fill_sg(qc);
1414}
1415
05b308e1
BR
1416/**
1417 * mv_qc_issue - Initiate a command to the host
1418 * @qc: queued command to start
1419 *
1420 * This routine simply redirects to the general purpose routine
1421 * if command is not DMA. Else, it sanity checks our local
1422 * caches of the request producer/consumer indices then enables
1423 * DMA and bumps the request producer index.
1424 *
1425 * LOCKING:
1426 * Inherited from caller.
1427 */
9a3d9eb0 1428static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1429{
c5d3e45a
JG
1430 struct ata_port *ap = qc->ap;
1431 void __iomem *port_mmio = mv_ap_base(ap);
1432 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1433 u32 in_index;
31961943 1434
138bfdd0
ML
1435 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1436 (qc->tf.protocol != ATA_PROT_NCQ)) {
17c5aab5
ML
1437 /*
1438 * We're about to send a non-EDMA capable command to the
31961943
BR
1439 * port. Turn off EDMA so there won't be problems accessing
1440 * shadow block, etc registers.
1441 */
b562468c 1442 mv_stop_edma(ap);
e49856d8 1443 mv_pmp_select(ap, qc->dev->link->pmp);
9363c382 1444 return ata_sff_qc_issue(qc);
31961943
BR
1445 }
1446
72109168 1447 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
bdd4ddde 1448
bdd4ddde 1449 pp->req_idx++;
31961943 1450
bdd4ddde 1451 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1452
1453 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1454 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1455 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1456
1457 return 0;
1458}
1459
05b308e1
BR
1460/**
1461 * mv_err_intr - Handle error interrupts on the port
1462 * @ap: ATA channel to manipulate
9b358e30 1463 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1464 *
1465 * In most cases, just clear the interrupt and move on. However,
e12bef50
ML
1466 * some cases require an eDMA reset, which also performs a COMRESET.
1467 * The SERR case requires a clear of pending errors in the SATA
1468 * SERROR register. Finally, if the port disabled DMA,
1469 * update our cached copy to match.
05b308e1
BR
1470 *
1471 * LOCKING:
1472 * Inherited from caller.
1473 */
bdd4ddde 1474static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1475{
1476 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1477 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1478 struct mv_port_priv *pp = ap->private_data;
1479 struct mv_host_priv *hpriv = ap->host->private_data;
1480 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1481 unsigned int action = 0, err_mask = 0;
9af5c9c9 1482 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1483
bdd4ddde 1484 ata_ehi_clear_desc(ehi);
20f733e7 1485
bdd4ddde
JG
1486 if (!edma_enabled) {
1487 /* just a guess: do we need to do this? should we
1488 * expand this, and do it in all cases?
1489 */
936fd732
TH
1490 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1491 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1492 }
bdd4ddde
JG
1493
1494 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1495
1496 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1497
1498 /*
1499 * all generations share these EDMA error cause bits
1500 */
1501
1502 if (edma_err_cause & EDMA_ERR_DEV)
1503 err_mask |= AC_ERR_DEV;
1504 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1505 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1506 EDMA_ERR_INTRL_PAR)) {
1507 err_mask |= AC_ERR_ATA_BUS;
cf480626 1508 action |= ATA_EH_RESET;
b64bbc39 1509 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1510 }
1511 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1512 ata_ehi_hotplugged(ehi);
1513 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1514 "dev disconnect" : "dev connect");
cf480626 1515 action |= ATA_EH_RESET;
bdd4ddde
JG
1516 }
1517
ee9ccdf7 1518 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1519 eh_freeze_mask = EDMA_EH_FREEZE_5;
1520
1521 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
5ab063e3 1522 pp = ap->private_data;
bdd4ddde 1523 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1524 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1525 }
1526 } else {
1527 eh_freeze_mask = EDMA_EH_FREEZE;
1528
1529 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
5ab063e3 1530 pp = ap->private_data;
bdd4ddde 1531 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1532 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1533 }
1534
1535 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1536 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1537 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde 1538 err_mask = AC_ERR_ATA_BUS;
cf480626 1539 action |= ATA_EH_RESET;
bdd4ddde 1540 }
afb0edd9 1541 }
20f733e7
BR
1542
1543 /* Clear EDMA now that SERR cleanup done */
3606a380 1544 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1545
bdd4ddde
JG
1546 if (!err_mask) {
1547 err_mask = AC_ERR_OTHER;
cf480626 1548 action |= ATA_EH_RESET;
bdd4ddde
JG
1549 }
1550
1551 ehi->serror |= serr;
1552 ehi->action |= action;
1553
1554 if (qc)
1555 qc->err_mask |= err_mask;
1556 else
1557 ehi->err_mask |= err_mask;
1558
1559 if (edma_err_cause & eh_freeze_mask)
1560 ata_port_freeze(ap);
1561 else
1562 ata_port_abort(ap);
1563}
1564
1565static void mv_intr_pio(struct ata_port *ap)
1566{
1567 struct ata_queued_cmd *qc;
1568 u8 ata_status;
1569
1570 /* ignore spurious intr if drive still BUSY */
1571 ata_status = readb(ap->ioaddr.status_addr);
1572 if (unlikely(ata_status & ATA_BUSY))
1573 return;
1574
1575 /* get active ATA command */
9af5c9c9 1576 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1577 if (unlikely(!qc)) /* no active tag */
1578 return;
1579 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1580 return;
1581
1582 /* and finally, complete the ATA command */
1583 qc->err_mask |= ac_err_mask(ata_status);
1584 ata_qc_complete(qc);
1585}
1586
1587static void mv_intr_edma(struct ata_port *ap)
1588{
1589 void __iomem *port_mmio = mv_ap_base(ap);
1590 struct mv_host_priv *hpriv = ap->host->private_data;
1591 struct mv_port_priv *pp = ap->private_data;
1592 struct ata_queued_cmd *qc;
1593 u32 out_index, in_index;
1594 bool work_done = false;
1595
1596 /* get h/w response queue pointer */
1597 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1598 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1599
1600 while (1) {
1601 u16 status;
6c1153e0 1602 unsigned int tag;
bdd4ddde
JG
1603
1604 /* get s/w response queue last-read pointer, and compare */
1605 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1606 if (in_index == out_index)
1607 break;
1608
bdd4ddde 1609 /* 50xx: get active ATA command */
0ea9e179 1610 if (IS_GEN_I(hpriv))
9af5c9c9 1611 tag = ap->link.active_tag;
bdd4ddde 1612
6c1153e0
JG
1613 /* Gen II/IIE: get active ATA command via tag, to enable
1614 * support for queueing. this works transparently for
1615 * queued and non-queued modes.
bdd4ddde 1616 */
8c0aeb4a
ML
1617 else
1618 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
bdd4ddde 1619
6c1153e0 1620 qc = ata_qc_from_tag(ap, tag);
bdd4ddde 1621
cb924419
ML
1622 /* For non-NCQ mode, the lower 8 bits of status
1623 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1624 * which should be zero if all went well.
bdd4ddde
JG
1625 */
1626 status = le16_to_cpu(pp->crpb[out_index].flags);
cb924419 1627 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
bdd4ddde
JG
1628 mv_err_intr(ap, qc);
1629 return;
1630 }
1631
1632 /* and finally, complete the ATA command */
1633 if (qc) {
1634 qc->err_mask |=
1635 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1636 ata_qc_complete(qc);
1637 }
1638
0ea9e179 1639 /* advance software response queue pointer, to
bdd4ddde
JG
1640 * indicate (after the loop completes) to hardware
1641 * that we have consumed a response queue entry.
1642 */
1643 work_done = true;
1644 pp->resp_idx++;
1645 }
1646
1647 if (work_done)
1648 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1649 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1650 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1651}
1652
05b308e1
BR
1653/**
1654 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1655 * @host: host specific structure
05b308e1
BR
1656 * @relevant: port error bits relevant to this host controller
1657 * @hc: which host controller we're to look at
1658 *
1659 * Read then write clear the HC interrupt status then walk each
1660 * port connected to the HC and see if it needs servicing. Port
1661 * success ints are reported in the HC interrupt status reg, the
1662 * port error ints are reported in the higher level main
1663 * interrupt status register and thus are passed in via the
1664 * 'relevant' argument.
1665 *
1666 * LOCKING:
1667 * Inherited from caller.
1668 */
cca3974e 1669static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1670{
f351b2d6
SB
1671 struct mv_host_priv *hpriv = host->private_data;
1672 void __iomem *mmio = hpriv->base;
20f733e7 1673 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1674 u32 hc_irq_cause;
f351b2d6 1675 int port, port0, last_port;
20f733e7 1676
35177265 1677 if (hc == 0)
20f733e7 1678 port0 = 0;
35177265 1679 else
20f733e7 1680 port0 = MV_PORTS_PER_HC;
20f733e7 1681
f351b2d6
SB
1682 if (HAS_PCI(host))
1683 last_port = port0 + MV_PORTS_PER_HC;
1684 else
1685 last_port = port0 + hpriv->n_ports;
20f733e7
BR
1686 /* we'll need the HC success int register in most cases */
1687 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1688 if (!hc_irq_cause)
1689 return;
1690
1691 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1692
1693 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1694 hc, relevant, hc_irq_cause);
20f733e7 1695
8f71efe2 1696 for (port = port0; port < last_port; port++) {
cca3974e 1697 struct ata_port *ap = host->ports[port];
8f71efe2 1698 struct mv_port_priv *pp;
bdd4ddde 1699 int have_err_bits, hard_port, shift;
55d8ca4f 1700
bdd4ddde 1701 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1702 continue;
1703
8f71efe2
YL
1704 pp = ap->private_data;
1705
31961943 1706 shift = port << 1; /* (port * 2) */
e12bef50 1707 if (port >= MV_PORTS_PER_HC)
20f733e7 1708 shift++; /* skip bit 8 in the HC Main IRQ reg */
e12bef50 1709
bdd4ddde
JG
1710 have_err_bits = ((PORT0_ERR << shift) & relevant);
1711
1712 if (unlikely(have_err_bits)) {
1713 struct ata_queued_cmd *qc;
8b260248 1714
9af5c9c9 1715 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1716 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1717 continue;
1718
1719 mv_err_intr(ap, qc);
1720 continue;
1721 }
1722
1723 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1724
1725 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1726 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1727 mv_intr_edma(ap);
1728 } else {
1729 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1730 mv_intr_pio(ap);
20f733e7
BR
1731 }
1732 }
1733 VPRINTK("EXIT\n");
1734}
1735
bdd4ddde
JG
1736static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1737{
02a121da 1738 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1739 struct ata_port *ap;
1740 struct ata_queued_cmd *qc;
1741 struct ata_eh_info *ehi;
1742 unsigned int i, err_mask, printed = 0;
1743 u32 err_cause;
1744
02a121da 1745 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1746
1747 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1748 err_cause);
1749
1750 DPRINTK("All regs @ PCI error\n");
1751 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1752
02a121da 1753 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1754
1755 for (i = 0; i < host->n_ports; i++) {
1756 ap = host->ports[i];
936fd732 1757 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1758 ehi = &ap->link.eh_info;
bdd4ddde
JG
1759 ata_ehi_clear_desc(ehi);
1760 if (!printed++)
1761 ata_ehi_push_desc(ehi,
1762 "PCI err cause 0x%08x", err_cause);
1763 err_mask = AC_ERR_HOST_BUS;
cf480626 1764 ehi->action = ATA_EH_RESET;
9af5c9c9 1765 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1766 if (qc)
1767 qc->err_mask |= err_mask;
1768 else
1769 ehi->err_mask |= err_mask;
1770
1771 ata_port_freeze(ap);
1772 }
1773 }
1774}
1775
05b308e1 1776/**
c5d3e45a 1777 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1778 * @irq: unused
1779 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1780 *
1781 * Read the read only register to determine if any host
1782 * controllers have pending interrupts. If so, call lower level
1783 * routine to handle. Also check for PCI errors which are only
1784 * reported here.
1785 *
8b260248 1786 * LOCKING:
cca3974e 1787 * This routine holds the host lock while processing pending
05b308e1
BR
1788 * interrupts.
1789 */
7d12e780 1790static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1791{
cca3974e 1792 struct ata_host *host = dev_instance;
f351b2d6 1793 struct mv_host_priv *hpriv = host->private_data;
20f733e7 1794 unsigned int hc, handled = 0, n_hcs;
f351b2d6 1795 void __iomem *mmio = hpriv->base;
646a4da5 1796 u32 irq_stat, irq_mask;
20f733e7 1797
e12bef50 1798 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
646a4da5 1799 spin_lock(&host->lock);
f351b2d6
SB
1800
1801 irq_stat = readl(hpriv->main_cause_reg_addr);
1802 irq_mask = readl(hpriv->main_mask_reg_addr);
20f733e7
BR
1803
1804 /* check the cases where we either have nothing pending or have read
1805 * a bogus register value which can indicate HW removal or PCI fault
1806 */
646a4da5
ML
1807 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1808 goto out_unlock;
20f733e7 1809
cca3974e 1810 n_hcs = mv_get_hc_count(host->ports[0]->flags);
20f733e7 1811
7bb3c529 1812 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
bdd4ddde
JG
1813 mv_pci_error(host, mmio);
1814 handled = 1;
1815 goto out_unlock; /* skip all other HC irq handling */
1816 }
1817
20f733e7
BR
1818 for (hc = 0; hc < n_hcs; hc++) {
1819 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1820 if (relevant) {
cca3974e 1821 mv_host_intr(host, relevant, hc);
bdd4ddde 1822 handled = 1;
20f733e7
BR
1823 }
1824 }
615ab953 1825
bdd4ddde 1826out_unlock:
cca3974e 1827 spin_unlock(&host->lock);
20f733e7
BR
1828
1829 return IRQ_RETVAL(handled);
1830}
1831
c9d39130
JG
1832static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1833{
1834 unsigned int ofs;
1835
1836 switch (sc_reg_in) {
1837 case SCR_STATUS:
1838 case SCR_ERROR:
1839 case SCR_CONTROL:
1840 ofs = sc_reg_in * sizeof(u32);
1841 break;
1842 default:
1843 ofs = 0xffffffffU;
1844 break;
1845 }
1846 return ofs;
1847}
1848
da3dbb17 1849static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1850{
f351b2d6
SB
1851 struct mv_host_priv *hpriv = ap->host->private_data;
1852 void __iomem *mmio = hpriv->base;
0d5ff566 1853 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1854 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1855
da3dbb17
TH
1856 if (ofs != 0xffffffffU) {
1857 *val = readl(addr + ofs);
1858 return 0;
1859 } else
1860 return -EINVAL;
c9d39130
JG
1861}
1862
da3dbb17 1863static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1864{
f351b2d6
SB
1865 struct mv_host_priv *hpriv = ap->host->private_data;
1866 void __iomem *mmio = hpriv->base;
0d5ff566 1867 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1868 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1869
da3dbb17 1870 if (ofs != 0xffffffffU) {
0d5ff566 1871 writelfl(val, addr + ofs);
da3dbb17
TH
1872 return 0;
1873 } else
1874 return -EINVAL;
c9d39130
JG
1875}
1876
7bb3c529 1877static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
522479fb 1878{
7bb3c529 1879 struct pci_dev *pdev = to_pci_dev(host->dev);
522479fb
JG
1880 int early_5080;
1881
44c10138 1882 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1883
1884 if (!early_5080) {
1885 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1886 tmp |= (1 << 0);
1887 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1888 }
1889
7bb3c529 1890 mv_reset_pci_bus(host, mmio);
522479fb
JG
1891}
1892
1893static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1894{
1895 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1896}
1897
47c2b677 1898static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1899 void __iomem *mmio)
1900{
c9d39130
JG
1901 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1902 u32 tmp;
1903
1904 tmp = readl(phy_mmio + MV5_PHY_MODE);
1905
1906 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1907 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1908}
1909
47c2b677 1910static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1911{
522479fb
JG
1912 u32 tmp;
1913
1914 writel(0, mmio + MV_GPIO_PORT_CTL);
1915
1916 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1917
1918 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1919 tmp |= ~(1 << 0);
1920 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1921}
1922
2a47ce06
JG
1923static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1924 unsigned int port)
bca1c4eb 1925{
c9d39130
JG
1926 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1927 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1928 u32 tmp;
1929 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1930
1931 if (fix_apm_sq) {
1932 tmp = readl(phy_mmio + MV5_LT_MODE);
1933 tmp |= (1 << 19);
1934 writel(tmp, phy_mmio + MV5_LT_MODE);
1935
1936 tmp = readl(phy_mmio + MV5_PHY_CTL);
1937 tmp &= ~0x3;
1938 tmp |= 0x1;
1939 writel(tmp, phy_mmio + MV5_PHY_CTL);
1940 }
1941
1942 tmp = readl(phy_mmio + MV5_PHY_MODE);
1943 tmp &= ~mask;
1944 tmp |= hpriv->signal[port].pre;
1945 tmp |= hpriv->signal[port].amps;
1946 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1947}
1948
c9d39130
JG
1949
1950#undef ZERO
1951#define ZERO(reg) writel(0, port_mmio + (reg))
1952static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1953 unsigned int port)
1954{
1955 void __iomem *port_mmio = mv_port_base(mmio, port);
1956
b562468c
ML
1957 /*
1958 * The datasheet warns against setting ATA_RST when EDMA is active
1959 * (but doesn't say what the problem might be). So we first try
1960 * to disable the EDMA engine before doing the ATA_RST operation.
1961 */
e12bef50 1962 mv_reset_channel(hpriv, mmio, port);
c9d39130
JG
1963
1964 ZERO(0x028); /* command */
1965 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1966 ZERO(0x004); /* timer */
1967 ZERO(0x008); /* irq err cause */
1968 ZERO(0x00c); /* irq err mask */
1969 ZERO(0x010); /* rq bah */
1970 ZERO(0x014); /* rq inp */
1971 ZERO(0x018); /* rq outp */
1972 ZERO(0x01c); /* respq bah */
1973 ZERO(0x024); /* respq outp */
1974 ZERO(0x020); /* respq inp */
1975 ZERO(0x02c); /* test control */
1976 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1977}
1978#undef ZERO
1979
1980#define ZERO(reg) writel(0, hc_mmio + (reg))
1981static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1982 unsigned int hc)
47c2b677 1983{
c9d39130
JG
1984 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1985 u32 tmp;
1986
1987 ZERO(0x00c);
1988 ZERO(0x010);
1989 ZERO(0x014);
1990 ZERO(0x018);
1991
1992 tmp = readl(hc_mmio + 0x20);
1993 tmp &= 0x1c1c1c1c;
1994 tmp |= 0x03030303;
1995 writel(tmp, hc_mmio + 0x20);
1996}
1997#undef ZERO
1998
1999static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2000 unsigned int n_hc)
2001{
2002 unsigned int hc, port;
2003
2004 for (hc = 0; hc < n_hc; hc++) {
2005 for (port = 0; port < MV_PORTS_PER_HC; port++)
2006 mv5_reset_hc_port(hpriv, mmio,
2007 (hc * MV_PORTS_PER_HC) + port);
2008
2009 mv5_reset_one_hc(hpriv, mmio, hc);
2010 }
2011
2012 return 0;
47c2b677
JG
2013}
2014
101ffae2
JG
2015#undef ZERO
2016#define ZERO(reg) writel(0, mmio + (reg))
7bb3c529 2017static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
101ffae2 2018{
02a121da 2019 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
2020 u32 tmp;
2021
2022 tmp = readl(mmio + MV_PCI_MODE);
2023 tmp &= 0xff00ffff;
2024 writel(tmp, mmio + MV_PCI_MODE);
2025
2026 ZERO(MV_PCI_DISC_TIMER);
2027 ZERO(MV_PCI_MSI_TRIGGER);
2028 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2029 ZERO(HC_MAIN_IRQ_MASK_OFS);
2030 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
2031 ZERO(hpriv->irq_cause_ofs);
2032 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
2033 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2034 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2035 ZERO(MV_PCI_ERR_ATTRIBUTE);
2036 ZERO(MV_PCI_ERR_COMMAND);
2037}
2038#undef ZERO
2039
2040static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2041{
2042 u32 tmp;
2043
2044 mv5_reset_flash(hpriv, mmio);
2045
2046 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2047 tmp &= 0x3;
2048 tmp |= (1 << 5) | (1 << 6);
2049 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2050}
2051
2052/**
2053 * mv6_reset_hc - Perform the 6xxx global soft reset
2054 * @mmio: base address of the HBA
2055 *
2056 * This routine only applies to 6xxx parts.
2057 *
2058 * LOCKING:
2059 * Inherited from caller.
2060 */
c9d39130
JG
2061static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2062 unsigned int n_hc)
101ffae2
JG
2063{
2064 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2065 int i, rc = 0;
2066 u32 t;
2067
2068 /* Following procedure defined in PCI "main command and status
2069 * register" table.
2070 */
2071 t = readl(reg);
2072 writel(t | STOP_PCI_MASTER, reg);
2073
2074 for (i = 0; i < 1000; i++) {
2075 udelay(1);
2076 t = readl(reg);
2dcb407e 2077 if (PCI_MASTER_EMPTY & t)
101ffae2 2078 break;
101ffae2
JG
2079 }
2080 if (!(PCI_MASTER_EMPTY & t)) {
2081 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2082 rc = 1;
2083 goto done;
2084 }
2085
2086 /* set reset */
2087 i = 5;
2088 do {
2089 writel(t | GLOB_SFT_RST, reg);
2090 t = readl(reg);
2091 udelay(1);
2092 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2093
2094 if (!(GLOB_SFT_RST & t)) {
2095 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2096 rc = 1;
2097 goto done;
2098 }
2099
2100 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2101 i = 5;
2102 do {
2103 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2104 t = readl(reg);
2105 udelay(1);
2106 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2107
2108 if (GLOB_SFT_RST & t) {
2109 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2110 rc = 1;
2111 }
094e50b2
ML
2112 /*
2113 * Temporary: wait 3 seconds before port-probing can happen,
2114 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2115 * This can go away once hotplug is fully/correctly implemented.
2116 */
2117 if (rc == 0)
2118 msleep(3000);
101ffae2
JG
2119done:
2120 return rc;
2121}
2122
47c2b677 2123static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2124 void __iomem *mmio)
2125{
2126 void __iomem *port_mmio;
2127 u32 tmp;
2128
ba3fe8fb
JG
2129 tmp = readl(mmio + MV_RESET_CFG);
2130 if ((tmp & (1 << 0)) == 0) {
47c2b677 2131 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2132 hpriv->signal[idx].pre = 0x1 << 5;
2133 return;
2134 }
2135
2136 port_mmio = mv_port_base(mmio, idx);
2137 tmp = readl(port_mmio + PHY_MODE2);
2138
2139 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2140 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2141}
2142
47c2b677 2143static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2144{
47c2b677 2145 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2146}
2147
c9d39130 2148static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2149 unsigned int port)
bca1c4eb 2150{
c9d39130
JG
2151 void __iomem *port_mmio = mv_port_base(mmio, port);
2152
bca1c4eb 2153 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2154 int fix_phy_mode2 =
2155 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2156 int fix_phy_mode4 =
47c2b677
JG
2157 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2158 u32 m2, tmp;
2159
2160 if (fix_phy_mode2) {
2161 m2 = readl(port_mmio + PHY_MODE2);
2162 m2 &= ~(1 << 16);
2163 m2 |= (1 << 31);
2164 writel(m2, port_mmio + PHY_MODE2);
2165
2166 udelay(200);
2167
2168 m2 = readl(port_mmio + PHY_MODE2);
2169 m2 &= ~((1 << 16) | (1 << 31));
2170 writel(m2, port_mmio + PHY_MODE2);
2171
2172 udelay(200);
2173 }
2174
2175 /* who knows what this magic does */
2176 tmp = readl(port_mmio + PHY_MODE3);
2177 tmp &= ~0x7F800000;
2178 tmp |= 0x2A800000;
2179 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2180
2181 if (fix_phy_mode4) {
47c2b677 2182 u32 m4;
bca1c4eb
JG
2183
2184 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2185
2186 if (hp_flags & MV_HP_ERRATA_60X1B2)
e12bef50 2187 tmp = readl(port_mmio + PHY_MODE3);
bca1c4eb 2188
e12bef50 2189 /* workaround for errata FEr SATA#10 (part 1) */
bca1c4eb
JG
2190 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2191
2192 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2193
2194 if (hp_flags & MV_HP_ERRATA_60X1B2)
e12bef50 2195 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2196 }
2197
2198 /* Revert values of pre-emphasis and signal amps to the saved ones */
2199 m2 = readl(port_mmio + PHY_MODE2);
2200
2201 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2202 m2 |= hpriv->signal[port].amps;
2203 m2 |= hpriv->signal[port].pre;
47c2b677 2204 m2 &= ~(1 << 16);
bca1c4eb 2205
e4e7b892
JG
2206 /* according to mvSata 3.6.1, some IIE values are fixed */
2207 if (IS_GEN_IIE(hpriv)) {
2208 m2 &= ~0xC30FF01F;
2209 m2 |= 0x0000900F;
2210 }
2211
bca1c4eb
JG
2212 writel(m2, port_mmio + PHY_MODE2);
2213}
2214
f351b2d6
SB
2215/* TODO: use the generic LED interface to configure the SATA Presence */
2216/* & Acitivy LEDs on the board */
2217static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2218 void __iomem *mmio)
2219{
2220 return;
2221}
2222
2223static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2224 void __iomem *mmio)
2225{
2226 void __iomem *port_mmio;
2227 u32 tmp;
2228
2229 port_mmio = mv_port_base(mmio, idx);
2230 tmp = readl(port_mmio + PHY_MODE2);
2231
2232 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2233 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2234}
2235
2236#undef ZERO
2237#define ZERO(reg) writel(0, port_mmio + (reg))
2238static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2239 void __iomem *mmio, unsigned int port)
2240{
2241 void __iomem *port_mmio = mv_port_base(mmio, port);
2242
b562468c
ML
2243 /*
2244 * The datasheet warns against setting ATA_RST when EDMA is active
2245 * (but doesn't say what the problem might be). So we first try
2246 * to disable the EDMA engine before doing the ATA_RST operation.
2247 */
e12bef50 2248 mv_reset_channel(hpriv, mmio, port);
f351b2d6
SB
2249
2250 ZERO(0x028); /* command */
2251 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2252 ZERO(0x004); /* timer */
2253 ZERO(0x008); /* irq err cause */
2254 ZERO(0x00c); /* irq err mask */
2255 ZERO(0x010); /* rq bah */
2256 ZERO(0x014); /* rq inp */
2257 ZERO(0x018); /* rq outp */
2258 ZERO(0x01c); /* respq bah */
2259 ZERO(0x024); /* respq outp */
2260 ZERO(0x020); /* respq inp */
2261 ZERO(0x02c); /* test control */
2262 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2263}
2264
2265#undef ZERO
2266
2267#define ZERO(reg) writel(0, hc_mmio + (reg))
2268static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2269 void __iomem *mmio)
2270{
2271 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2272
2273 ZERO(0x00c);
2274 ZERO(0x010);
2275 ZERO(0x014);
2276
2277}
2278
2279#undef ZERO
2280
2281static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2282 void __iomem *mmio, unsigned int n_hc)
2283{
2284 unsigned int port;
2285
2286 for (port = 0; port < hpriv->n_ports; port++)
2287 mv_soc_reset_hc_port(hpriv, mmio, port);
2288
2289 mv_soc_reset_one_hc(hpriv, mmio);
2290
2291 return 0;
2292}
2293
2294static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2295 void __iomem *mmio)
2296{
2297 return;
2298}
2299
2300static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2301{
2302 return;
2303}
2304
b67a1064
ML
2305static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2306{
2307 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2308
2309 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2310 if (want_gen2i)
2311 ifctl |= (1 << 7); /* enable gen2i speed */
2312 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2313}
2314
b562468c
ML
2315/*
2316 * Caller must ensure that EDMA is not active,
2317 * by first doing mv_stop_edma() where needed.
2318 */
e12bef50 2319static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
c9d39130
JG
2320 unsigned int port_no)
2321{
2322 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2323
0d8be5cb 2324 mv_stop_edma_engine(port_mmio);
c9d39130
JG
2325 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2326
b67a1064
ML
2327 if (!IS_GEN_I(hpriv)) {
2328 /* Enable 3.0gb/s link speed */
2329 mv_setup_ifctl(port_mmio, 1);
c9d39130 2330 }
b67a1064
ML
2331 /*
2332 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2333 * link, and physical layers. It resets all SATA interface registers
2334 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
c9d39130 2335 */
b67a1064
ML
2336 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2337 udelay(25); /* allow reset propagation */
c9d39130
JG
2338 writelfl(0, port_mmio + EDMA_CMD_OFS);
2339
2340 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2341
ee9ccdf7 2342 if (IS_GEN_I(hpriv))
c9d39130
JG
2343 mdelay(1);
2344}
2345
e49856d8 2346static void mv_pmp_select(struct ata_port *ap, int pmp)
20f733e7 2347{
e49856d8
ML
2348 if (sata_pmp_supported(ap)) {
2349 void __iomem *port_mmio = mv_ap_base(ap);
2350 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2351 int old = reg & 0xf;
22374677 2352
e49856d8
ML
2353 if (old != pmp) {
2354 reg = (reg & ~0xf) | pmp;
2355 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2356 }
22374677 2357 }
20f733e7
BR
2358}
2359
e49856d8
ML
2360static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2361 unsigned long deadline)
22374677 2362{
e49856d8
ML
2363 mv_pmp_select(link->ap, sata_srst_pmp(link));
2364 return sata_std_hardreset(link, class, deadline);
2365}
bdd4ddde 2366
e49856d8
ML
2367static int mv_softreset(struct ata_link *link, unsigned int *class,
2368 unsigned long deadline)
2369{
2370 mv_pmp_select(link->ap, sata_srst_pmp(link));
2371 return ata_sff_softreset(link, class, deadline);
22374677
JG
2372}
2373
cc0680a5 2374static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2375 unsigned long deadline)
31961943 2376{
cc0680a5 2377 struct ata_port *ap = link->ap;
bdd4ddde 2378 struct mv_host_priv *hpriv = ap->host->private_data;
b562468c 2379 struct mv_port_priv *pp = ap->private_data;
f351b2d6 2380 void __iomem *mmio = hpriv->base;
0d8be5cb
ML
2381 int rc, attempts = 0, extra = 0;
2382 u32 sstatus;
2383 bool online;
31961943 2384
e12bef50 2385 mv_reset_channel(hpriv, mmio, ap->port_no);
b562468c 2386 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
bdd4ddde 2387
0d8be5cb
ML
2388 /* Workaround for errata FEr SATA#10 (part 2) */
2389 do {
17c5aab5
ML
2390 const unsigned long *timing =
2391 sata_ehc_deb_timing(&link->eh_context);
bdd4ddde 2392
17c5aab5
ML
2393 rc = sata_link_hardreset(link, timing, deadline + extra,
2394 &online, NULL);
2395 if (rc)
0d8be5cb 2396 return rc;
0d8be5cb
ML
2397 sata_scr_read(link, SCR_STATUS, &sstatus);
2398 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2399 /* Force 1.5gb/s link speed and try again */
2400 mv_setup_ifctl(mv_ap_base(ap), 0);
2401 if (time_after(jiffies + HZ, deadline))
2402 extra = HZ; /* only extend it once, max */
2403 }
2404 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
bdd4ddde 2405
17c5aab5 2406 return rc;
bdd4ddde
JG
2407}
2408
bdd4ddde
JG
2409static void mv_eh_freeze(struct ata_port *ap)
2410{
f351b2d6 2411 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde
JG
2412 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2413 u32 tmp, mask;
2414 unsigned int shift;
2415
2416 /* FIXME: handle coalescing completion events properly */
2417
2418 shift = ap->port_no * 2;
2419 if (hc > 0)
2420 shift++;
2421
2422 mask = 0x3 << shift;
2423
2424 /* disable assertion of portN err, done events */
f351b2d6
SB
2425 tmp = readl(hpriv->main_mask_reg_addr);
2426 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
bdd4ddde
JG
2427}
2428
2429static void mv_eh_thaw(struct ata_port *ap)
2430{
f351b2d6
SB
2431 struct mv_host_priv *hpriv = ap->host->private_data;
2432 void __iomem *mmio = hpriv->base;
bdd4ddde
JG
2433 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2434 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2435 void __iomem *port_mmio = mv_ap_base(ap);
2436 u32 tmp, mask, hc_irq_cause;
2437 unsigned int shift, hc_port_no = ap->port_no;
2438
2439 /* FIXME: handle coalescing completion events properly */
2440
2441 shift = ap->port_no * 2;
2442 if (hc > 0) {
2443 shift++;
2444 hc_port_no -= 4;
2445 }
2446
2447 mask = 0x3 << shift;
2448
2449 /* clear EDMA errors on this port */
2450 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2451
2452 /* clear pending irq events */
2453 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2454 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2455 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2456 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2457
2458 /* enable assertion of portN err, done events */
f351b2d6
SB
2459 tmp = readl(hpriv->main_mask_reg_addr);
2460 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
31961943
BR
2461}
2462
05b308e1
BR
2463/**
2464 * mv_port_init - Perform some early initialization on a single port.
2465 * @port: libata data structure storing shadow register addresses
2466 * @port_mmio: base address of the port
2467 *
2468 * Initialize shadow register mmio addresses, clear outstanding
2469 * interrupts on the port, and unmask interrupts for the future
2470 * start of the port.
2471 *
2472 * LOCKING:
2473 * Inherited from caller.
2474 */
31961943 2475static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2476{
0d5ff566 2477 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2478 unsigned serr_ofs;
2479
8b260248 2480 /* PIO related setup
31961943
BR
2481 */
2482 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2483 port->error_addr =
31961943
BR
2484 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2485 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2486 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2487 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2488 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2489 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2490 port->status_addr =
31961943
BR
2491 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2492 /* special case: control/altstatus doesn't have ATA_REG_ address */
2493 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2494
2495 /* unused: */
8d9db2d2 2496 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2497
31961943
BR
2498 /* Clear any currently outstanding port interrupt conditions */
2499 serr_ofs = mv_scr_offset(SCR_ERROR);
2500 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2501 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2502
646a4da5
ML
2503 /* unmask all non-transient EDMA error interrupts */
2504 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2505
8b260248 2506 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2507 readl(port_mmio + EDMA_CFG_OFS),
2508 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2509 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2510}
2511
4447d351 2512static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2513{
4447d351
TH
2514 struct pci_dev *pdev = to_pci_dev(host->dev);
2515 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2516 u32 hp_flags = hpriv->hp_flags;
2517
5796d1c4 2518 switch (board_idx) {
47c2b677
JG
2519 case chip_5080:
2520 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2521 hp_flags |= MV_HP_GEN_I;
47c2b677 2522
44c10138 2523 switch (pdev->revision) {
47c2b677
JG
2524 case 0x1:
2525 hp_flags |= MV_HP_ERRATA_50XXB0;
2526 break;
2527 case 0x3:
2528 hp_flags |= MV_HP_ERRATA_50XXB2;
2529 break;
2530 default:
2531 dev_printk(KERN_WARNING, &pdev->dev,
2532 "Applying 50XXB2 workarounds to unknown rev\n");
2533 hp_flags |= MV_HP_ERRATA_50XXB2;
2534 break;
2535 }
2536 break;
2537
bca1c4eb
JG
2538 case chip_504x:
2539 case chip_508x:
47c2b677 2540 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2541 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2542
44c10138 2543 switch (pdev->revision) {
47c2b677
JG
2544 case 0x0:
2545 hp_flags |= MV_HP_ERRATA_50XXB0;
2546 break;
2547 case 0x3:
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 break;
2550 default:
2551 dev_printk(KERN_WARNING, &pdev->dev,
2552 "Applying B2 workarounds to unknown rev\n");
2553 hp_flags |= MV_HP_ERRATA_50XXB2;
2554 break;
bca1c4eb
JG
2555 }
2556 break;
2557
2558 case chip_604x:
2559 case chip_608x:
47c2b677 2560 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2561 hp_flags |= MV_HP_GEN_II;
47c2b677 2562
44c10138 2563 switch (pdev->revision) {
47c2b677
JG
2564 case 0x7:
2565 hp_flags |= MV_HP_ERRATA_60X1B2;
2566 break;
2567 case 0x9:
2568 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2569 break;
2570 default:
2571 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2572 "Applying B2 workarounds to unknown rev\n");
2573 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2574 break;
2575 }
2576 break;
2577
e4e7b892 2578 case chip_7042:
02a121da 2579 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2580 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2581 (pdev->device == 0x2300 || pdev->device == 0x2310))
2582 {
4e520033
ML
2583 /*
2584 * Highpoint RocketRAID PCIe 23xx series cards:
2585 *
2586 * Unconfigured drives are treated as "Legacy"
2587 * by the BIOS, and it overwrites sector 8 with
2588 * a "Lgcy" metadata block prior to Linux boot.
2589 *
2590 * Configured drives (RAID or JBOD) leave sector 8
2591 * alone, but instead overwrite a high numbered
2592 * sector for the RAID metadata. This sector can
2593 * be determined exactly, by truncating the physical
2594 * drive capacity to a nice even GB value.
2595 *
2596 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2597 *
2598 * Warn the user, lest they think we're just buggy.
2599 */
2600 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2601 " BIOS CORRUPTS DATA on all attached drives,"
2602 " regardless of if/how they are configured."
2603 " BEWARE!\n");
2604 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2605 " use sectors 8-9 on \"Legacy\" drives,"
2606 " and avoid the final two gigabytes on"
2607 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2608 }
e4e7b892
JG
2609 case chip_6042:
2610 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2611 hp_flags |= MV_HP_GEN_IIE;
2612
44c10138 2613 switch (pdev->revision) {
e4e7b892
JG
2614 case 0x0:
2615 hp_flags |= MV_HP_ERRATA_XX42A0;
2616 break;
2617 case 0x1:
2618 hp_flags |= MV_HP_ERRATA_60X1C0;
2619 break;
2620 default:
2621 dev_printk(KERN_WARNING, &pdev->dev,
2622 "Applying 60X1C0 workarounds to unknown rev\n");
2623 hp_flags |= MV_HP_ERRATA_60X1C0;
2624 break;
2625 }
2626 break;
f351b2d6
SB
2627 case chip_soc:
2628 hpriv->ops = &mv_soc_ops;
2629 hp_flags |= MV_HP_ERRATA_60X1C0;
2630 break;
e4e7b892 2631
bca1c4eb 2632 default:
f351b2d6 2633 dev_printk(KERN_ERR, host->dev,
5796d1c4 2634 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2635 return 1;
2636 }
2637
2638 hpriv->hp_flags = hp_flags;
02a121da
ML
2639 if (hp_flags & MV_HP_PCIE) {
2640 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2641 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2642 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2643 } else {
2644 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2645 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2646 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2647 }
bca1c4eb
JG
2648
2649 return 0;
2650}
2651
05b308e1 2652/**
47c2b677 2653 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2654 * @host: ATA host to initialize
2655 * @board_idx: controller index
05b308e1
BR
2656 *
2657 * If possible, do an early global reset of the host. Then do
2658 * our port init and clear/unmask all/relevant host interrupts.
2659 *
2660 * LOCKING:
2661 * Inherited from caller.
2662 */
4447d351 2663static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2664{
2665 int rc = 0, n_hc, port, hc;
4447d351 2666 struct mv_host_priv *hpriv = host->private_data;
f351b2d6 2667 void __iomem *mmio = hpriv->base;
47c2b677 2668
4447d351 2669 rc = mv_chip_id(host, board_idx);
bca1c4eb 2670 if (rc)
f351b2d6
SB
2671 goto done;
2672
2673 if (HAS_PCI(host)) {
2674 hpriv->main_cause_reg_addr = hpriv->base +
2675 HC_MAIN_IRQ_CAUSE_OFS;
2676 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2677 } else {
2678 hpriv->main_cause_reg_addr = hpriv->base +
2679 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2680 hpriv->main_mask_reg_addr = hpriv->base +
2681 HC_SOC_MAIN_IRQ_MASK_OFS;
2682 }
2683 /* global interrupt mask */
2684 writel(0, hpriv->main_mask_reg_addr);
bca1c4eb 2685
4447d351 2686 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2687
4447d351 2688 for (port = 0; port < host->n_ports; port++)
47c2b677 2689 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2690
c9d39130 2691 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2692 if (rc)
20f733e7 2693 goto done;
20f733e7 2694
522479fb 2695 hpriv->ops->reset_flash(hpriv, mmio);
7bb3c529 2696 hpriv->ops->reset_bus(host, mmio);
47c2b677 2697 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2698
4447d351 2699 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2700 struct ata_port *ap = host->ports[port];
2a47ce06 2701 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2702
2703 mv_port_init(&ap->ioaddr, port_mmio);
2704
7bb3c529 2705#ifdef CONFIG_PCI
f351b2d6
SB
2706 if (HAS_PCI(host)) {
2707 unsigned int offset = port_mmio - mmio;
2708 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2709 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2710 }
7bb3c529 2711#endif
20f733e7
BR
2712 }
2713
2714 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2715 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2716
2717 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2718 "(before clear)=0x%08x\n", hc,
2719 readl(hc_mmio + HC_CFG_OFS),
2720 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2721
2722 /* Clear any currently outstanding hc interrupt conditions */
2723 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2724 }
2725
f351b2d6
SB
2726 if (HAS_PCI(host)) {
2727 /* Clear any currently outstanding host interrupt conditions */
2728 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943 2729
f351b2d6
SB
2730 /* and unmask interrupt generation for host regs */
2731 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2732 if (IS_GEN_I(hpriv))
2733 writelfl(~HC_MAIN_MASKED_IRQS_5,
2734 hpriv->main_mask_reg_addr);
2735 else
2736 writelfl(~HC_MAIN_MASKED_IRQS,
2737 hpriv->main_mask_reg_addr);
2738
2739 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2740 "PCI int cause/mask=0x%08x/0x%08x\n",
2741 readl(hpriv->main_cause_reg_addr),
2742 readl(hpriv->main_mask_reg_addr),
2743 readl(mmio + hpriv->irq_cause_ofs),
2744 readl(mmio + hpriv->irq_mask_ofs));
2745 } else {
2746 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2747 hpriv->main_mask_reg_addr);
2748 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2749 readl(hpriv->main_cause_reg_addr),
2750 readl(hpriv->main_mask_reg_addr));
2751 }
2752done:
2753 return rc;
2754}
fb621e2f 2755
fbf14e2f
BB
2756static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2757{
2758 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2759 MV_CRQB_Q_SZ, 0);
2760 if (!hpriv->crqb_pool)
2761 return -ENOMEM;
2762
2763 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2764 MV_CRPB_Q_SZ, 0);
2765 if (!hpriv->crpb_pool)
2766 return -ENOMEM;
2767
2768 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2769 MV_SG_TBL_SZ, 0);
2770 if (!hpriv->sg_tbl_pool)
2771 return -ENOMEM;
2772
2773 return 0;
2774}
2775
15a32632
LB
2776static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2777 struct mbus_dram_target_info *dram)
2778{
2779 int i;
2780
2781 for (i = 0; i < 4; i++) {
2782 writel(0, hpriv->base + WINDOW_CTRL(i));
2783 writel(0, hpriv->base + WINDOW_BASE(i));
2784 }
2785
2786 for (i = 0; i < dram->num_cs; i++) {
2787 struct mbus_dram_window *cs = dram->cs + i;
2788
2789 writel(((cs->size - 1) & 0xffff0000) |
2790 (cs->mbus_attr << 8) |
2791 (dram->mbus_dram_target_id << 4) | 1,
2792 hpriv->base + WINDOW_CTRL(i));
2793 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2794 }
2795}
2796
f351b2d6
SB
2797/**
2798 * mv_platform_probe - handle a positive probe of an soc Marvell
2799 * host
2800 * @pdev: platform device found
2801 *
2802 * LOCKING:
2803 * Inherited from caller.
2804 */
2805static int mv_platform_probe(struct platform_device *pdev)
2806{
2807 static int printed_version;
2808 const struct mv_sata_platform_data *mv_platform_data;
2809 const struct ata_port_info *ppi[] =
2810 { &mv_port_info[chip_soc], NULL };
2811 struct ata_host *host;
2812 struct mv_host_priv *hpriv;
2813 struct resource *res;
2814 int n_ports, rc;
20f733e7 2815
f351b2d6
SB
2816 if (!printed_version++)
2817 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
bca1c4eb 2818
f351b2d6
SB
2819 /*
2820 * Simple resource validation ..
2821 */
2822 if (unlikely(pdev->num_resources != 2)) {
2823 dev_err(&pdev->dev, "invalid number of resources\n");
2824 return -EINVAL;
2825 }
2826
2827 /*
2828 * Get the register base first
2829 */
2830 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2831 if (res == NULL)
2832 return -EINVAL;
2833
2834 /* allocate host */
2835 mv_platform_data = pdev->dev.platform_data;
2836 n_ports = mv_platform_data->n_ports;
2837
2838 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2839 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2840
2841 if (!host || !hpriv)
2842 return -ENOMEM;
2843 host->private_data = hpriv;
2844 hpriv->n_ports = n_ports;
2845
2846 host->iomap = NULL;
f1cb0ea1
SB
2847 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2848 res->end - res->start + 1);
f351b2d6
SB
2849 hpriv->base -= MV_SATAHC0_REG_BASE;
2850
15a32632
LB
2851 /*
2852 * (Re-)program MBUS remapping windows if we are asked to.
2853 */
2854 if (mv_platform_data->dram != NULL)
2855 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2856
fbf14e2f
BB
2857 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2858 if (rc)
2859 return rc;
2860
f351b2d6
SB
2861 /* initialize adapter */
2862 rc = mv_init_host(host, chip_soc);
2863 if (rc)
2864 return rc;
2865
2866 dev_printk(KERN_INFO, &pdev->dev,
2867 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2868 host->n_ports);
2869
2870 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2871 IRQF_SHARED, &mv6_sht);
2872}
2873
2874/*
2875 *
2876 * mv_platform_remove - unplug a platform interface
2877 * @pdev: platform device
2878 *
2879 * A platform bus SATA device has been unplugged. Perform the needed
2880 * cleanup. Also called on module unload for any active devices.
2881 */
2882static int __devexit mv_platform_remove(struct platform_device *pdev)
2883{
2884 struct device *dev = &pdev->dev;
2885 struct ata_host *host = dev_get_drvdata(dev);
f351b2d6
SB
2886
2887 ata_host_detach(host);
f351b2d6 2888 return 0;
20f733e7
BR
2889}
2890
f351b2d6
SB
2891static struct platform_driver mv_platform_driver = {
2892 .probe = mv_platform_probe,
2893 .remove = __devexit_p(mv_platform_remove),
2894 .driver = {
2895 .name = DRV_NAME,
2896 .owner = THIS_MODULE,
2897 },
2898};
2899
2900
7bb3c529 2901#ifdef CONFIG_PCI
f351b2d6
SB
2902static int mv_pci_init_one(struct pci_dev *pdev,
2903 const struct pci_device_id *ent);
2904
7bb3c529
SB
2905
2906static struct pci_driver mv_pci_driver = {
2907 .name = DRV_NAME,
2908 .id_table = mv_pci_tbl,
f351b2d6 2909 .probe = mv_pci_init_one,
7bb3c529
SB
2910 .remove = ata_pci_remove_one,
2911};
2912
2913/*
2914 * module options
2915 */
2916static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2917
2918
2919/* move to PCI layer or libata core? */
2920static int pci_go_64(struct pci_dev *pdev)
2921{
2922 int rc;
2923
2924 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2925 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2926 if (rc) {
2927 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2928 if (rc) {
2929 dev_printk(KERN_ERR, &pdev->dev,
2930 "64-bit DMA enable failed\n");
2931 return rc;
2932 }
2933 }
2934 } else {
2935 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2936 if (rc) {
2937 dev_printk(KERN_ERR, &pdev->dev,
2938 "32-bit DMA enable failed\n");
2939 return rc;
2940 }
2941 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2942 if (rc) {
2943 dev_printk(KERN_ERR, &pdev->dev,
2944 "32-bit consistent DMA enable failed\n");
2945 return rc;
2946 }
2947 }
2948
2949 return rc;
2950}
2951
05b308e1
BR
2952/**
2953 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2954 * @host: ATA host to print info about
05b308e1
BR
2955 *
2956 * FIXME: complete this.
2957 *
2958 * LOCKING:
2959 * Inherited from caller.
2960 */
4447d351 2961static void mv_print_info(struct ata_host *host)
31961943 2962{
4447d351
TH
2963 struct pci_dev *pdev = to_pci_dev(host->dev);
2964 struct mv_host_priv *hpriv = host->private_data;
44c10138 2965 u8 scc;
c1e4fe71 2966 const char *scc_s, *gen;
31961943
BR
2967
2968 /* Use this to determine the HW stepping of the chip so we know
2969 * what errata to workaround
2970 */
31961943
BR
2971 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2972 if (scc == 0)
2973 scc_s = "SCSI";
2974 else if (scc == 0x01)
2975 scc_s = "RAID";
2976 else
c1e4fe71
JG
2977 scc_s = "?";
2978
2979 if (IS_GEN_I(hpriv))
2980 gen = "I";
2981 else if (IS_GEN_II(hpriv))
2982 gen = "II";
2983 else if (IS_GEN_IIE(hpriv))
2984 gen = "IIE";
2985 else
2986 gen = "?";
31961943 2987
a9524a76 2988 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2989 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2990 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2991 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2992}
2993
05b308e1 2994/**
f351b2d6 2995 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
05b308e1
BR
2996 * @pdev: PCI device found
2997 * @ent: PCI device ID entry for the matched host
2998 *
2999 * LOCKING:
3000 * Inherited from caller.
3001 */
f351b2d6
SB
3002static int mv_pci_init_one(struct pci_dev *pdev,
3003 const struct pci_device_id *ent)
20f733e7 3004{
2dcb407e 3005 static int printed_version;
20f733e7 3006 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
3007 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3008 struct ata_host *host;
3009 struct mv_host_priv *hpriv;
3010 int n_ports, rc;
20f733e7 3011
a9524a76
JG
3012 if (!printed_version++)
3013 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 3014
4447d351
TH
3015 /* allocate host */
3016 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3017
3018 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3019 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3020 if (!host || !hpriv)
3021 return -ENOMEM;
3022 host->private_data = hpriv;
f351b2d6 3023 hpriv->n_ports = n_ports;
4447d351
TH
3024
3025 /* acquire resources */
24dc5f33
TH
3026 rc = pcim_enable_device(pdev);
3027 if (rc)
20f733e7 3028 return rc;
20f733e7 3029
0d5ff566
TH
3030 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3031 if (rc == -EBUSY)
24dc5f33 3032 pcim_pin_device(pdev);
0d5ff566 3033 if (rc)
24dc5f33 3034 return rc;
4447d351 3035 host->iomap = pcim_iomap_table(pdev);
f351b2d6 3036 hpriv->base = host->iomap[MV_PRIMARY_BAR];
20f733e7 3037
d88184fb
JG
3038 rc = pci_go_64(pdev);
3039 if (rc)
3040 return rc;
3041
da2fa9ba
ML
3042 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3043 if (rc)
3044 return rc;
3045
20f733e7 3046 /* initialize adapter */
4447d351 3047 rc = mv_init_host(host, board_idx);
24dc5f33
TH
3048 if (rc)
3049 return rc;
20f733e7 3050
31961943 3051 /* Enable interrupts */
6a59dcf8 3052 if (msi && pci_enable_msi(pdev))
31961943 3053 pci_intx(pdev, 1);
20f733e7 3054
31961943 3055 mv_dump_pci_cfg(pdev, 0x68);
4447d351 3056 mv_print_info(host);
20f733e7 3057
4447d351 3058 pci_set_master(pdev);
ea8b4db9 3059 pci_try_set_mwi(pdev);
4447d351 3060 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 3061 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7 3062}
7bb3c529 3063#endif
20f733e7 3064
f351b2d6
SB
3065static int mv_platform_probe(struct platform_device *pdev);
3066static int __devexit mv_platform_remove(struct platform_device *pdev);
3067
20f733e7
BR
3068static int __init mv_init(void)
3069{
7bb3c529
SB
3070 int rc = -ENODEV;
3071#ifdef CONFIG_PCI
3072 rc = pci_register_driver(&mv_pci_driver);
f351b2d6
SB
3073 if (rc < 0)
3074 return rc;
3075#endif
3076 rc = platform_driver_register(&mv_platform_driver);
3077
3078#ifdef CONFIG_PCI
3079 if (rc < 0)
3080 pci_unregister_driver(&mv_pci_driver);
7bb3c529
SB
3081#endif
3082 return rc;
20f733e7
BR
3083}
3084
3085static void __exit mv_exit(void)
3086{
7bb3c529 3087#ifdef CONFIG_PCI
20f733e7 3088 pci_unregister_driver(&mv_pci_driver);
7bb3c529 3089#endif
f351b2d6 3090 platform_driver_unregister(&mv_platform_driver);
20f733e7
BR
3091}
3092
3093MODULE_AUTHOR("Brett Russ");
3094MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3095MODULE_LICENSE("GPL");
3096MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3097MODULE_VERSION(DRV_VERSION);
17c5aab5 3098MODULE_ALIAS("platform:" DRV_NAME);
20f733e7 3099
7bb3c529 3100#ifdef CONFIG_PCI
ddef9bb3
JG
3101module_param(msi, int, 0444);
3102MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
7bb3c529 3103#endif
ddef9bb3 3104
20f733e7
BR
3105module_init(mv_init);
3106module_exit(mv_exit);
This page took 0.539303 seconds and 5 git commands to generate.