Merge branch 'linux-2.6'
[deliverable/linux.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
4a05e209
JG
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
20f733e7
BR
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
20f733e7 68#include <linux/dma-mapping.h>
a9524a76 69#include <linux/device.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
6c08772e 72#include <scsi/scsi_device.h>
20f733e7 73#include <linux/libata.h>
20f733e7
BR
74
75#define DRV_NAME "sata_mv"
6c08772e 76#define DRV_VERSION "1.01"
20f733e7
BR
77
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
20f733e7 95 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 96 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
20f733e7
BR
99
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
31961943
BR
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
20f733e7
BR
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
31961943 122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
47c2b677 131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 132
31961943
BR
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
20f733e7
BR
147 /* PCI interface registers */
148
31961943
BR
149 PCI_COMMAND_OFS = 0xc00,
150
20f733e7
BR
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
522479fb
JG
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
02a121da
ML
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
02a121da
ML
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x70a, /* assorted bits */
174
20f733e7
BR
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
fb621e2f
JG
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
20f733e7
BR
197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
31961943 202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
31961943
BR
207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
47c2b677 213 PHY_MODE3 = 0x310,
bca1c4eb
JG
214 PHY_MODE4 = 0x314,
215 PHY_MODE2 = 0x330,
c9d39130
JG
216 MV5_PHY_MODE = 0x74,
217 MV5_LT_MODE = 0x30,
218 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
219 SATA_INTERFACE_CTL = 0x050,
220
221 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
222
223 /* Port registers */
224 EDMA_CFG_OFS = 0,
31961943
BR
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
230
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
233 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
234 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
235 EDMA_ERR_DEV = (1 << 2), /* device error */
236 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
237 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
238 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
239 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
240 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 241 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 242 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
243 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
244 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
245 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
246 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
20f733e7 248 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
6c1153e0
JG
249 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
250 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
251 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
252 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
253 EDMA_ERR_OVERRUN_5 = (1 << 5),
254 EDMA_ERR_UNDERRUN_5 = (1 << 6),
bdd4ddde
JG
255 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
256 EDMA_ERR_PRD_PAR |
257 EDMA_ERR_DEV_DCON |
258 EDMA_ERR_DEV_CON |
259 EDMA_ERR_SERR |
260 EDMA_ERR_SELF_DIS |
6c1153e0 261 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
262 EDMA_ERR_CRPB_PAR |
263 EDMA_ERR_INTRL_PAR |
264 EDMA_ERR_IORDY |
265 EDMA_ERR_LNK_CTRL_RX_2 |
266 EDMA_ERR_LNK_DATA_RX |
267 EDMA_ERR_LNK_DATA_TX |
268 EDMA_ERR_TRANS_PROTO,
269 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
270 EDMA_ERR_PRD_PAR |
271 EDMA_ERR_DEV_DCON |
272 EDMA_ERR_DEV_CON |
273 EDMA_ERR_OVERRUN_5 |
274 EDMA_ERR_UNDERRUN_5 |
275 EDMA_ERR_SELF_DIS_5 |
6c1153e0 276 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
277 EDMA_ERR_CRPB_PAR |
278 EDMA_ERR_INTRL_PAR |
279 EDMA_ERR_IORDY,
20f733e7 280
31961943
BR
281 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
282 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
283
284 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
285 EDMA_REQ_Q_PTR_SHIFT = 5,
286
287 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
288 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
289 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
290 EDMA_RSP_Q_PTR_SHIFT = 3,
291
0ea9e179
JG
292 EDMA_CMD_OFS = 0x28, /* EDMA command register */
293 EDMA_EN = (1 << 0), /* enable EDMA */
294 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
295 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 296
c9d39130 297 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 298 EDMA_ARB_CFG = 0x38,
bca1c4eb 299
31961943
BR
300 /* Host private flags (hp_flags) */
301 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
302 MV_HP_ERRATA_50XXB0 = (1 << 1),
303 MV_HP_ERRATA_50XXB2 = (1 << 2),
304 MV_HP_ERRATA_60X1B2 = (1 << 3),
305 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 306 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
307 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
308 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
309 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 310 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 311
31961943 312 /* Port private flags (pp_flags) */
0ea9e179
JG
313 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
314 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
315};
316
ee9ccdf7
JG
317#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
318#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 319#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 320
095fec88 321enum {
baf14aa1
JG
322 /* DMA boundary 0xffff is required by the s/g splitting
323 * we need on /length/ in mv_fill-sg().
324 */
325 MV_DMA_BOUNDARY = 0xffffU,
095fec88 326
0ea9e179
JG
327 /* mask of register bits containing lower 32 bits
328 * of EDMA request queue DMA address
329 */
095fec88
JG
330 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
331
0ea9e179 332 /* ditto, for response queue */
095fec88
JG
333 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
334};
335
522479fb
JG
336enum chip_type {
337 chip_504x,
338 chip_508x,
339 chip_5080,
340 chip_604x,
341 chip_608x,
e4e7b892
JG
342 chip_6042,
343 chip_7042,
522479fb
JG
344};
345
31961943
BR
346/* Command ReQuest Block: 32B */
347struct mv_crqb {
e1469874
ML
348 __le32 sg_addr;
349 __le32 sg_addr_hi;
350 __le16 ctrl_flags;
351 __le16 ata_cmd[11];
31961943 352};
20f733e7 353
e4e7b892 354struct mv_crqb_iie {
e1469874
ML
355 __le32 addr;
356 __le32 addr_hi;
357 __le32 flags;
358 __le32 len;
359 __le32 ata_cmd[4];
e4e7b892
JG
360};
361
31961943
BR
362/* Command ResPonse Block: 8B */
363struct mv_crpb {
e1469874
ML
364 __le16 id;
365 __le16 flags;
366 __le32 tmstmp;
20f733e7
BR
367};
368
31961943
BR
369/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
370struct mv_sg {
e1469874
ML
371 __le32 addr;
372 __le32 flags_size;
373 __le32 addr_hi;
374 __le32 reserved;
31961943 375};
20f733e7 376
31961943
BR
377struct mv_port_priv {
378 struct mv_crqb *crqb;
379 dma_addr_t crqb_dma;
380 struct mv_crpb *crpb;
381 dma_addr_t crpb_dma;
382 struct mv_sg *sg_tbl;
383 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
384
385 unsigned int req_idx;
386 unsigned int resp_idx;
387
31961943
BR
388 u32 pp_flags;
389};
390
bca1c4eb
JG
391struct mv_port_signal {
392 u32 amps;
393 u32 pre;
394};
395
02a121da
ML
396struct mv_host_priv {
397 u32 hp_flags;
398 struct mv_port_signal signal[8];
399 const struct mv_hw_ops *ops;
400 u32 irq_cause_ofs;
401 u32 irq_mask_ofs;
402 u32 unmask_all_irqs;
403};
404
47c2b677 405struct mv_hw_ops {
2a47ce06
JG
406 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
407 unsigned int port);
47c2b677
JG
408 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
409 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
410 void __iomem *mmio);
c9d39130
JG
411 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
412 unsigned int n_hc);
522479fb
JG
413 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
414 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
415};
416
20f733e7 417static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
418static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
419static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
420static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
421static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
422static int mv_port_start(struct ata_port *ap);
423static void mv_port_stop(struct ata_port *ap);
424static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 425static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 426static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
427static void mv_error_handler(struct ata_port *ap);
428static void mv_post_int_cmd(struct ata_queued_cmd *qc);
429static void mv_eh_freeze(struct ata_port *ap);
430static void mv_eh_thaw(struct ata_port *ap);
20f733e7
BR
431static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
432
2a47ce06
JG
433static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
434 unsigned int port);
47c2b677
JG
435static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
436static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
437 void __iomem *mmio);
c9d39130
JG
438static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
439 unsigned int n_hc);
522479fb
JG
440static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
441static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 442
2a47ce06
JG
443static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
444 unsigned int port);
47c2b677
JG
445static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
446static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
447 void __iomem *mmio);
c9d39130
JG
448static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
449 unsigned int n_hc);
522479fb
JG
450static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
451static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
452static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
453 unsigned int port_no);
47c2b677 454
c5d3e45a
JG
455static struct scsi_host_template mv5_sht = {
456 .module = THIS_MODULE,
457 .name = DRV_NAME,
458 .ioctl = ata_scsi_ioctl,
459 .queuecommand = ata_scsi_queuecmd,
460 .can_queue = ATA_DEF_QUEUE,
461 .this_id = ATA_SHT_THIS_ID,
baf14aa1 462 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
463 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
464 .emulated = ATA_SHT_EMULATED,
465 .use_clustering = 1,
466 .proc_name = DRV_NAME,
467 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 468 .slave_configure = ata_scsi_slave_config,
c5d3e45a
JG
469 .slave_destroy = ata_scsi_slave_destroy,
470 .bios_param = ata_std_bios_param,
471};
472
473static struct scsi_host_template mv6_sht = {
20f733e7
BR
474 .module = THIS_MODULE,
475 .name = DRV_NAME,
476 .ioctl = ata_scsi_ioctl,
477 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 478 .can_queue = ATA_DEF_QUEUE,
20f733e7 479 .this_id = ATA_SHT_THIS_ID,
baf14aa1 480 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
481 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
482 .emulated = ATA_SHT_EMULATED,
d88184fb 483 .use_clustering = 1,
20f733e7
BR
484 .proc_name = DRV_NAME,
485 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 486 .slave_configure = ata_scsi_slave_config,
ccf68c34 487 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 488 .bios_param = ata_std_bios_param,
20f733e7
BR
489};
490
c9d39130 491static const struct ata_port_operations mv5_ops = {
c9d39130
JG
492 .tf_load = ata_tf_load,
493 .tf_read = ata_tf_read,
494 .check_status = ata_check_status,
495 .exec_command = ata_exec_command,
496 .dev_select = ata_std_dev_select,
497
cffacd85 498 .cable_detect = ata_cable_sata,
c9d39130
JG
499
500 .qc_prep = mv_qc_prep,
501 .qc_issue = mv_qc_issue,
0d5ff566 502 .data_xfer = ata_data_xfer,
c9d39130 503
c9d39130 504 .irq_clear = mv_irq_clear,
246ce3b6 505 .irq_on = ata_irq_on,
c9d39130 506
bdd4ddde
JG
507 .error_handler = mv_error_handler,
508 .post_internal_cmd = mv_post_int_cmd,
509 .freeze = mv_eh_freeze,
510 .thaw = mv_eh_thaw,
511
c9d39130
JG
512 .scr_read = mv5_scr_read,
513 .scr_write = mv5_scr_write,
514
515 .port_start = mv_port_start,
516 .port_stop = mv_port_stop,
c9d39130
JG
517};
518
519static const struct ata_port_operations mv6_ops = {
20f733e7
BR
520 .tf_load = ata_tf_load,
521 .tf_read = ata_tf_read,
522 .check_status = ata_check_status,
523 .exec_command = ata_exec_command,
524 .dev_select = ata_std_dev_select,
525
cffacd85 526 .cable_detect = ata_cable_sata,
20f733e7 527
31961943
BR
528 .qc_prep = mv_qc_prep,
529 .qc_issue = mv_qc_issue,
0d5ff566 530 .data_xfer = ata_data_xfer,
20f733e7 531
20f733e7 532 .irq_clear = mv_irq_clear,
246ce3b6 533 .irq_on = ata_irq_on,
20f733e7 534
bdd4ddde
JG
535 .error_handler = mv_error_handler,
536 .post_internal_cmd = mv_post_int_cmd,
537 .freeze = mv_eh_freeze,
538 .thaw = mv_eh_thaw,
539
20f733e7
BR
540 .scr_read = mv_scr_read,
541 .scr_write = mv_scr_write,
542
31961943
BR
543 .port_start = mv_port_start,
544 .port_stop = mv_port_stop,
20f733e7
BR
545};
546
e4e7b892 547static const struct ata_port_operations mv_iie_ops = {
e4e7b892
JG
548 .tf_load = ata_tf_load,
549 .tf_read = ata_tf_read,
550 .check_status = ata_check_status,
551 .exec_command = ata_exec_command,
552 .dev_select = ata_std_dev_select,
553
cffacd85 554 .cable_detect = ata_cable_sata,
e4e7b892
JG
555
556 .qc_prep = mv_qc_prep_iie,
557 .qc_issue = mv_qc_issue,
0d5ff566 558 .data_xfer = ata_data_xfer,
e4e7b892 559
e4e7b892 560 .irq_clear = mv_irq_clear,
246ce3b6 561 .irq_on = ata_irq_on,
e4e7b892 562
bdd4ddde
JG
563 .error_handler = mv_error_handler,
564 .post_internal_cmd = mv_post_int_cmd,
565 .freeze = mv_eh_freeze,
566 .thaw = mv_eh_thaw,
567
e4e7b892
JG
568 .scr_read = mv_scr_read,
569 .scr_write = mv_scr_write,
570
571 .port_start = mv_port_start,
572 .port_stop = mv_port_stop,
e4e7b892
JG
573};
574
98ac62de 575static const struct ata_port_info mv_port_info[] = {
20f733e7 576 { /* chip_504x */
cca3974e 577 .flags = MV_COMMON_FLAGS,
31961943 578 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 579 .udma_mask = ATA_UDMA6,
c9d39130 580 .port_ops = &mv5_ops,
20f733e7
BR
581 },
582 { /* chip_508x */
c5d3e45a 583 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 584 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 585 .udma_mask = ATA_UDMA6,
c9d39130 586 .port_ops = &mv5_ops,
20f733e7 587 },
47c2b677 588 { /* chip_5080 */
c5d3e45a 589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 590 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 591 .udma_mask = ATA_UDMA6,
c9d39130 592 .port_ops = &mv5_ops,
47c2b677 593 },
20f733e7 594 { /* chip_604x */
c5d3e45a 595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 596 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 597 .udma_mask = ATA_UDMA6,
c9d39130 598 .port_ops = &mv6_ops,
20f733e7
BR
599 },
600 { /* chip_608x */
c5d3e45a
JG
601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602 MV_FLAG_DUAL_HC,
31961943 603 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 604 .udma_mask = ATA_UDMA6,
c9d39130 605 .port_ops = &mv6_ops,
20f733e7 606 },
e4e7b892 607 { /* chip_6042 */
c5d3e45a 608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 609 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 610 .udma_mask = ATA_UDMA6,
e4e7b892
JG
611 .port_ops = &mv_iie_ops,
612 },
613 { /* chip_7042 */
c5d3e45a 614 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 615 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 616 .udma_mask = ATA_UDMA6,
e4e7b892
JG
617 .port_ops = &mv_iie_ops,
618 },
20f733e7
BR
619};
620
3b7d697d 621static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
622 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
623 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
624 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
625 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
626 /* RocketRAID 1740/174x have different identifiers */
627 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
628 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
629
630 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
631 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
632 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
633 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
634 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
635
636 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
637
d9f9c6bc
FA
638 /* Adaptec 1430SA */
639 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
640
02a121da 641 /* Marvell 7042 support */
6a3d586d
MT
642 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
643
02a121da
ML
644 /* Highpoint RocketRAID PCIe series */
645 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
646 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
647
2d2744fc 648 { } /* terminate list */
20f733e7
BR
649};
650
651static struct pci_driver mv_pci_driver = {
652 .name = DRV_NAME,
653 .id_table = mv_pci_tbl,
654 .probe = mv_init_one,
655 .remove = ata_pci_remove_one,
656};
657
47c2b677
JG
658static const struct mv_hw_ops mv5xxx_ops = {
659 .phy_errata = mv5_phy_errata,
660 .enable_leds = mv5_enable_leds,
661 .read_preamp = mv5_read_preamp,
662 .reset_hc = mv5_reset_hc,
522479fb
JG
663 .reset_flash = mv5_reset_flash,
664 .reset_bus = mv5_reset_bus,
47c2b677
JG
665};
666
667static const struct mv_hw_ops mv6xxx_ops = {
668 .phy_errata = mv6_phy_errata,
669 .enable_leds = mv6_enable_leds,
670 .read_preamp = mv6_read_preamp,
671 .reset_hc = mv6_reset_hc,
522479fb
JG
672 .reset_flash = mv6_reset_flash,
673 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
674};
675
ddef9bb3
JG
676/*
677 * module options
678 */
679static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
680
681
d88184fb
JG
682/* move to PCI layer or libata core? */
683static int pci_go_64(struct pci_dev *pdev)
684{
685 int rc;
686
687 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
688 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
689 if (rc) {
690 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
691 if (rc) {
692 dev_printk(KERN_ERR, &pdev->dev,
693 "64-bit DMA enable failed\n");
694 return rc;
695 }
696 }
697 } else {
698 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
699 if (rc) {
700 dev_printk(KERN_ERR, &pdev->dev,
701 "32-bit DMA enable failed\n");
702 return rc;
703 }
704 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
705 if (rc) {
706 dev_printk(KERN_ERR, &pdev->dev,
707 "32-bit consistent DMA enable failed\n");
708 return rc;
709 }
710 }
711
712 return rc;
713}
714
20f733e7
BR
715/*
716 * Functions
717 */
718
719static inline void writelfl(unsigned long data, void __iomem *addr)
720{
721 writel(data, addr);
722 (void) readl(addr); /* flush to avoid PCI posted write */
723}
724
20f733e7
BR
725static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
726{
727 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
728}
729
c9d39130
JG
730static inline unsigned int mv_hc_from_port(unsigned int port)
731{
732 return port >> MV_PORT_HC_SHIFT;
733}
734
735static inline unsigned int mv_hardport_from_port(unsigned int port)
736{
737 return port & MV_PORT_MASK;
738}
739
740static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
741 unsigned int port)
742{
743 return mv_hc_base(base, mv_hc_from_port(port));
744}
745
20f733e7
BR
746static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
747{
c9d39130 748 return mv_hc_base_from_port(base, port) +
8b260248 749 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 750 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
751}
752
753static inline void __iomem *mv_ap_base(struct ata_port *ap)
754{
0d5ff566 755 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
756}
757
cca3974e 758static inline int mv_get_hc_count(unsigned long port_flags)
31961943 759{
cca3974e 760 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
761}
762
763static void mv_irq_clear(struct ata_port *ap)
20f733e7 764{
20f733e7
BR
765}
766
c5d3e45a
JG
767static void mv_set_edma_ptrs(void __iomem *port_mmio,
768 struct mv_host_priv *hpriv,
769 struct mv_port_priv *pp)
770{
bdd4ddde
JG
771 u32 index;
772
c5d3e45a
JG
773 /*
774 * initialize request queue
775 */
bdd4ddde
JG
776 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
777
c5d3e45a
JG
778 WARN_ON(pp->crqb_dma & 0x3ff);
779 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 780 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
781 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
782
783 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 784 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
785 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
786 else
bdd4ddde 787 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
788
789 /*
790 * initialize response queue
791 */
bdd4ddde
JG
792 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
793
c5d3e45a
JG
794 WARN_ON(pp->crpb_dma & 0xff);
795 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
796
797 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 798 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
799 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
800 else
bdd4ddde 801 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 802
bdd4ddde 803 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 804 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
805}
806
05b308e1
BR
807/**
808 * mv_start_dma - Enable eDMA engine
809 * @base: port base address
810 * @pp: port private data
811 *
beec7dbc
TH
812 * Verify the local cache of the eDMA state is accurate with a
813 * WARN_ON.
05b308e1
BR
814 *
815 * LOCKING:
816 * Inherited from caller.
817 */
c5d3e45a
JG
818static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
819 struct mv_port_priv *pp)
20f733e7 820{
c5d3e45a 821 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
bdd4ddde
JG
822 /* clear EDMA event indicators, if any */
823 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
824
825 mv_set_edma_ptrs(base, hpriv, pp);
826
afb0edd9
BR
827 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
828 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
829 }
beec7dbc 830 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
20f733e7
BR
831}
832
05b308e1 833/**
0ea9e179 834 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
835 * @ap: ATA channel to manipulate
836 *
beec7dbc
TH
837 * Verify the local cache of the eDMA state is accurate with a
838 * WARN_ON.
05b308e1
BR
839 *
840 * LOCKING:
841 * Inherited from caller.
842 */
0ea9e179 843static int __mv_stop_dma(struct ata_port *ap)
20f733e7 844{
31961943
BR
845 void __iomem *port_mmio = mv_ap_base(ap);
846 struct mv_port_priv *pp = ap->private_data;
31961943 847 u32 reg;
c5d3e45a 848 int i, err = 0;
31961943 849
4537deb5 850 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 851 /* Disable EDMA if active. The disable bit auto clears.
31961943 852 */
31961943
BR
853 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
854 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 855 } else {
beec7dbc 856 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
2dcb407e 857 }
8b260248 858
31961943
BR
859 /* now properly wait for the eDMA to stop */
860 for (i = 1000; i > 0; i--) {
861 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 862 if (!(reg & EDMA_EN))
31961943 863 break;
4537deb5 864
31961943
BR
865 udelay(100);
866 }
867
c5d3e45a 868 if (reg & EDMA_EN) {
f15a1daf 869 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 870 err = -EIO;
31961943 871 }
c5d3e45a
JG
872
873 return err;
20f733e7
BR
874}
875
0ea9e179
JG
876static int mv_stop_dma(struct ata_port *ap)
877{
878 unsigned long flags;
879 int rc;
880
881 spin_lock_irqsave(&ap->host->lock, flags);
882 rc = __mv_stop_dma(ap);
883 spin_unlock_irqrestore(&ap->host->lock, flags);
884
885 return rc;
886}
887
8a70f8dc 888#ifdef ATA_DEBUG
31961943 889static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 890{
31961943
BR
891 int b, w;
892 for (b = 0; b < bytes; ) {
893 DPRINTK("%p: ", start + b);
894 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 895 printk("%08x ", readl(start + b));
31961943
BR
896 b += sizeof(u32);
897 }
898 printk("\n");
899 }
31961943 900}
8a70f8dc
JG
901#endif
902
31961943
BR
903static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
904{
905#ifdef ATA_DEBUG
906 int b, w;
907 u32 dw;
908 for (b = 0; b < bytes; ) {
909 DPRINTK("%02x: ", b);
910 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
911 (void) pci_read_config_dword(pdev, b, &dw);
912 printk("%08x ", dw);
31961943
BR
913 b += sizeof(u32);
914 }
915 printk("\n");
916 }
917#endif
918}
919static void mv_dump_all_regs(void __iomem *mmio_base, int port,
920 struct pci_dev *pdev)
921{
922#ifdef ATA_DEBUG
8b260248 923 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
924 port >> MV_PORT_HC_SHIFT);
925 void __iomem *port_base;
926 int start_port, num_ports, p, start_hc, num_hcs, hc;
927
928 if (0 > port) {
929 start_hc = start_port = 0;
930 num_ports = 8; /* shld be benign for 4 port devs */
931 num_hcs = 2;
932 } else {
933 start_hc = port >> MV_PORT_HC_SHIFT;
934 start_port = port;
935 num_ports = num_hcs = 1;
936 }
8b260248 937 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
938 num_ports > 1 ? num_ports - 1 : start_port);
939
940 if (NULL != pdev) {
941 DPRINTK("PCI config space regs:\n");
942 mv_dump_pci_cfg(pdev, 0x68);
943 }
944 DPRINTK("PCI regs:\n");
945 mv_dump_mem(mmio_base+0xc00, 0x3c);
946 mv_dump_mem(mmio_base+0xd00, 0x34);
947 mv_dump_mem(mmio_base+0xf00, 0x4);
948 mv_dump_mem(mmio_base+0x1d00, 0x6c);
949 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 950 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
951 DPRINTK("HC regs (HC %i):\n", hc);
952 mv_dump_mem(hc_base, 0x1c);
953 }
954 for (p = start_port; p < start_port + num_ports; p++) {
955 port_base = mv_port_base(mmio_base, p);
2dcb407e 956 DPRINTK("EDMA regs (port %i):\n", p);
31961943 957 mv_dump_mem(port_base, 0x54);
2dcb407e 958 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
959 mv_dump_mem(port_base+0x300, 0x60);
960 }
961#endif
20f733e7
BR
962}
963
964static unsigned int mv_scr_offset(unsigned int sc_reg_in)
965{
966 unsigned int ofs;
967
968 switch (sc_reg_in) {
969 case SCR_STATUS:
970 case SCR_CONTROL:
971 case SCR_ERROR:
972 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
973 break;
974 case SCR_ACTIVE:
975 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
976 break;
977 default:
978 ofs = 0xffffffffU;
979 break;
980 }
981 return ofs;
982}
983
da3dbb17 984static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
985{
986 unsigned int ofs = mv_scr_offset(sc_reg_in);
987
da3dbb17
TH
988 if (ofs != 0xffffffffU) {
989 *val = readl(mv_ap_base(ap) + ofs);
990 return 0;
991 } else
992 return -EINVAL;
20f733e7
BR
993}
994
da3dbb17 995static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
996{
997 unsigned int ofs = mv_scr_offset(sc_reg_in);
998
da3dbb17 999 if (ofs != 0xffffffffU) {
20f733e7 1000 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1001 return 0;
1002 } else
1003 return -EINVAL;
20f733e7
BR
1004}
1005
c5d3e45a
JG
1006static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1007 void __iomem *port_mmio)
e4e7b892
JG
1008{
1009 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1010
1011 /* set up non-NCQ EDMA configuration */
c5d3e45a 1012 cfg &= ~(1 << 9); /* disable eQue */
e4e7b892 1013
e728eabe
JG
1014 if (IS_GEN_I(hpriv)) {
1015 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1016 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 1017 }
e4e7b892 1018
e728eabe
JG
1019 else if (IS_GEN_II(hpriv)) {
1020 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1021 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
1022 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1023 }
e4e7b892
JG
1024
1025 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1026 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1027 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
1028 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1029 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
1030 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1031 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
4537deb5 1032 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
e4e7b892
JG
1033 }
1034
1035 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1036}
1037
05b308e1
BR
1038/**
1039 * mv_port_start - Port specific init/start routine.
1040 * @ap: ATA channel to manipulate
1041 *
1042 * Allocate and point to DMA memory, init port private memory,
1043 * zero indices.
1044 *
1045 * LOCKING:
1046 * Inherited from caller.
1047 */
31961943
BR
1048static int mv_port_start(struct ata_port *ap)
1049{
cca3974e
JG
1050 struct device *dev = ap->host->dev;
1051 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1052 struct mv_port_priv *pp;
1053 void __iomem *port_mmio = mv_ap_base(ap);
1054 void *mem;
1055 dma_addr_t mem_dma;
0ea9e179 1056 unsigned long flags;
24dc5f33 1057 int rc;
31961943 1058
24dc5f33 1059 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1060 if (!pp)
24dc5f33 1061 return -ENOMEM;
31961943 1062
24dc5f33
TH
1063 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1064 GFP_KERNEL);
6037d6bb 1065 if (!mem)
24dc5f33 1066 return -ENOMEM;
31961943
BR
1067 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1068
6037d6bb
JG
1069 rc = ata_pad_alloc(ap, dev);
1070 if (rc)
24dc5f33 1071 return rc;
6037d6bb 1072
8b260248 1073 /* First item in chunk of DMA memory:
31961943
BR
1074 * 32-slot command request table (CRQB), 32 bytes each in size
1075 */
1076 pp->crqb = mem;
1077 pp->crqb_dma = mem_dma;
1078 mem += MV_CRQB_Q_SZ;
1079 mem_dma += MV_CRQB_Q_SZ;
1080
8b260248 1081 /* Second item:
31961943
BR
1082 * 32-slot command response table (CRPB), 8 bytes each in size
1083 */
1084 pp->crpb = mem;
1085 pp->crpb_dma = mem_dma;
1086 mem += MV_CRPB_Q_SZ;
1087 mem_dma += MV_CRPB_Q_SZ;
1088
1089 /* Third item:
1090 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1091 */
1092 pp->sg_tbl = mem;
1093 pp->sg_tbl_dma = mem_dma;
1094
0ea9e179
JG
1095 spin_lock_irqsave(&ap->host->lock, flags);
1096
c5d3e45a 1097 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1098
c5d3e45a 1099 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1100
0ea9e179
JG
1101 spin_unlock_irqrestore(&ap->host->lock, flags);
1102
31961943
BR
1103 /* Don't turn on EDMA here...do it before DMA commands only. Else
1104 * we'll be unable to send non-data, PIO, etc due to restricted access
1105 * to shadow regs.
1106 */
1107 ap->private_data = pp;
1108 return 0;
1109}
1110
05b308e1
BR
1111/**
1112 * mv_port_stop - Port specific cleanup/stop routine.
1113 * @ap: ATA channel to manipulate
1114 *
1115 * Stop DMA, cleanup port memory.
1116 *
1117 * LOCKING:
cca3974e 1118 * This routine uses the host lock to protect the DMA stop.
05b308e1 1119 */
31961943
BR
1120static void mv_port_stop(struct ata_port *ap)
1121{
31961943 1122 mv_stop_dma(ap);
31961943
BR
1123}
1124
05b308e1
BR
1125/**
1126 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1127 * @qc: queued command whose SG list to source from
1128 *
1129 * Populate the SG list and mark the last entry.
1130 *
1131 * LOCKING:
1132 * Inherited from caller.
1133 */
6c08772e 1134static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1135{
1136 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1137 struct scatterlist *sg;
3be6cbd7 1138 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1139 unsigned int si;
31961943 1140
d88184fb 1141 mv_sg = pp->sg_tbl;
ff2aeb1e 1142 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1143 dma_addr_t addr = sg_dma_address(sg);
1144 u32 sg_len = sg_dma_len(sg);
22374677 1145
4007b493
OJ
1146 while (sg_len) {
1147 u32 offset = addr & 0xffff;
1148 u32 len = sg_len;
22374677 1149
4007b493
OJ
1150 if ((offset + sg_len > 0x10000))
1151 len = 0x10000 - offset;
1152
1153 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1154 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1155 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1156
1157 sg_len -= len;
1158 addr += len;
1159
3be6cbd7 1160 last_sg = mv_sg;
4007b493 1161 mv_sg++;
4007b493 1162 }
31961943 1163 }
3be6cbd7
JG
1164
1165 if (likely(last_sg))
1166 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1167}
1168
5796d1c4 1169static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1170{
559eedad 1171 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1172 (last ? CRQB_CMD_LAST : 0);
559eedad 1173 *cmdw = cpu_to_le16(tmp);
31961943
BR
1174}
1175
05b308e1
BR
1176/**
1177 * mv_qc_prep - Host specific command preparation.
1178 * @qc: queued command to prepare
1179 *
1180 * This routine simply redirects to the general purpose routine
1181 * if command is not DMA. Else, it handles prep of the CRQB
1182 * (command request block), does some sanity checking, and calls
1183 * the SG load routine.
1184 *
1185 * LOCKING:
1186 * Inherited from caller.
1187 */
31961943
BR
1188static void mv_qc_prep(struct ata_queued_cmd *qc)
1189{
1190 struct ata_port *ap = qc->ap;
1191 struct mv_port_priv *pp = ap->private_data;
e1469874 1192 __le16 *cw;
31961943
BR
1193 struct ata_taskfile *tf;
1194 u16 flags = 0;
a6432436 1195 unsigned in_index;
31961943 1196
2dcb407e 1197 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1198 return;
20f733e7 1199
31961943
BR
1200 /* Fill in command request block
1201 */
e4e7b892 1202 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1203 flags |= CRQB_FLAG_READ;
beec7dbc 1204 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1205 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1206 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1207
bdd4ddde
JG
1208 /* get current queue index from software */
1209 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1210
1211 pp->crqb[in_index].sg_addr =
31961943 1212 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1213 pp->crqb[in_index].sg_addr_hi =
31961943 1214 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1215 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1216
a6432436 1217 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1218 tf = &qc->tf;
1219
1220 /* Sadly, the CRQB cannot accomodate all registers--there are
1221 * only 11 bytes...so we must pick and choose required
1222 * registers based on the command. So, we drop feature and
1223 * hob_feature for [RW] DMA commands, but they are needed for
1224 * NCQ. NCQ will drop hob_nsect.
20f733e7 1225 */
31961943
BR
1226 switch (tf->command) {
1227 case ATA_CMD_READ:
1228 case ATA_CMD_READ_EXT:
1229 case ATA_CMD_WRITE:
1230 case ATA_CMD_WRITE_EXT:
c15d85c8 1231 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1232 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1233 break;
1234#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1235 case ATA_CMD_FPDMA_READ:
1236 case ATA_CMD_FPDMA_WRITE:
8b260248 1237 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1238 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1239 break;
1240#endif /* FIXME: remove this line when NCQ added */
1241 default:
1242 /* The only other commands EDMA supports in non-queued and
1243 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1244 * of which are defined/used by Linux. If we get here, this
1245 * driver needs work.
1246 *
1247 * FIXME: modify libata to give qc_prep a return value and
1248 * return error here.
1249 */
1250 BUG_ON(tf->command);
1251 break;
1252 }
1253 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1254 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1255 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1256 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1257 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1258 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1259 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1260 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1261 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1262
e4e7b892
JG
1263 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1264 return;
1265 mv_fill_sg(qc);
1266}
1267
1268/**
1269 * mv_qc_prep_iie - Host specific command preparation.
1270 * @qc: queued command to prepare
1271 *
1272 * This routine simply redirects to the general purpose routine
1273 * if command is not DMA. Else, it handles prep of the CRQB
1274 * (command request block), does some sanity checking, and calls
1275 * the SG load routine.
1276 *
1277 * LOCKING:
1278 * Inherited from caller.
1279 */
1280static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1281{
1282 struct ata_port *ap = qc->ap;
1283 struct mv_port_priv *pp = ap->private_data;
1284 struct mv_crqb_iie *crqb;
1285 struct ata_taskfile *tf;
a6432436 1286 unsigned in_index;
e4e7b892
JG
1287 u32 flags = 0;
1288
2dcb407e 1289 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1290 return;
1291
e4e7b892
JG
1292 /* Fill in Gen IIE command request block
1293 */
1294 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1295 flags |= CRQB_FLAG_READ;
1296
beec7dbc 1297 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1298 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1299 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1300 what we use as our tag */
e4e7b892 1301
bdd4ddde
JG
1302 /* get current queue index from software */
1303 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1304
1305 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1306 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1307 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1308 crqb->flags = cpu_to_le32(flags);
1309
1310 tf = &qc->tf;
1311 crqb->ata_cmd[0] = cpu_to_le32(
1312 (tf->command << 16) |
1313 (tf->feature << 24)
1314 );
1315 crqb->ata_cmd[1] = cpu_to_le32(
1316 (tf->lbal << 0) |
1317 (tf->lbam << 8) |
1318 (tf->lbah << 16) |
1319 (tf->device << 24)
1320 );
1321 crqb->ata_cmd[2] = cpu_to_le32(
1322 (tf->hob_lbal << 0) |
1323 (tf->hob_lbam << 8) |
1324 (tf->hob_lbah << 16) |
1325 (tf->hob_feature << 24)
1326 );
1327 crqb->ata_cmd[3] = cpu_to_le32(
1328 (tf->nsect << 0) |
1329 (tf->hob_nsect << 8)
1330 );
1331
1332 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1333 return;
31961943
BR
1334 mv_fill_sg(qc);
1335}
1336
05b308e1
BR
1337/**
1338 * mv_qc_issue - Initiate a command to the host
1339 * @qc: queued command to start
1340 *
1341 * This routine simply redirects to the general purpose routine
1342 * if command is not DMA. Else, it sanity checks our local
1343 * caches of the request producer/consumer indices then enables
1344 * DMA and bumps the request producer index.
1345 *
1346 * LOCKING:
1347 * Inherited from caller.
1348 */
9a3d9eb0 1349static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1350{
c5d3e45a
JG
1351 struct ata_port *ap = qc->ap;
1352 void __iomem *port_mmio = mv_ap_base(ap);
1353 struct mv_port_priv *pp = ap->private_data;
1354 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 1355 u32 in_index;
31961943 1356
c5d3e45a 1357 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1358 /* We're about to send a non-EDMA capable command to the
1359 * port. Turn off EDMA so there won't be problems accessing
1360 * shadow block, etc registers.
1361 */
0ea9e179 1362 __mv_stop_dma(ap);
31961943
BR
1363 return ata_qc_issue_prot(qc);
1364 }
1365
bdd4ddde
JG
1366 mv_start_dma(port_mmio, hpriv, pp);
1367
1368 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1369
31961943 1370 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1371 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1372 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1373
bdd4ddde 1374 pp->req_idx++;
31961943 1375
bdd4ddde 1376 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1377
1378 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1379 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1380 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1381
1382 return 0;
1383}
1384
05b308e1
BR
1385/**
1386 * mv_err_intr - Handle error interrupts on the port
1387 * @ap: ATA channel to manipulate
9b358e30 1388 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1389 *
1390 * In most cases, just clear the interrupt and move on. However,
1391 * some cases require an eDMA reset, which is done right before
1392 * the COMRESET in mv_phy_reset(). The SERR case requires a
1393 * clear of pending errors in the SATA SERROR register. Finally,
1394 * if the port disabled DMA, update our cached copy to match.
1395 *
1396 * LOCKING:
1397 * Inherited from caller.
1398 */
bdd4ddde 1399static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1400{
1401 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1402 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1403 struct mv_port_priv *pp = ap->private_data;
1404 struct mv_host_priv *hpriv = ap->host->private_data;
1405 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1406 unsigned int action = 0, err_mask = 0;
9af5c9c9 1407 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1408
bdd4ddde 1409 ata_ehi_clear_desc(ehi);
20f733e7 1410
bdd4ddde
JG
1411 if (!edma_enabled) {
1412 /* just a guess: do we need to do this? should we
1413 * expand this, and do it in all cases?
1414 */
936fd732
TH
1415 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1416 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1417 }
bdd4ddde
JG
1418
1419 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1420
1421 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1422
1423 /*
1424 * all generations share these EDMA error cause bits
1425 */
1426
1427 if (edma_err_cause & EDMA_ERR_DEV)
1428 err_mask |= AC_ERR_DEV;
1429 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1430 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1431 EDMA_ERR_INTRL_PAR)) {
1432 err_mask |= AC_ERR_ATA_BUS;
1433 action |= ATA_EH_HARDRESET;
b64bbc39 1434 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1435 }
1436 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1437 ata_ehi_hotplugged(ehi);
1438 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1439 "dev disconnect" : "dev connect");
bdd4ddde
JG
1440 }
1441
ee9ccdf7 1442 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1443 eh_freeze_mask = EDMA_EH_FREEZE_5;
1444
1445 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1446 struct mv_port_priv *pp = ap->private_data;
1447 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1448 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1449 }
1450 } else {
1451 eh_freeze_mask = EDMA_EH_FREEZE;
1452
1453 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1454 struct mv_port_priv *pp = ap->private_data;
1455 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1456 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1457 }
1458
1459 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1460 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1461 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde
JG
1462 err_mask = AC_ERR_ATA_BUS;
1463 action |= ATA_EH_HARDRESET;
1464 }
afb0edd9 1465 }
20f733e7
BR
1466
1467 /* Clear EDMA now that SERR cleanup done */
1468 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1469
bdd4ddde
JG
1470 if (!err_mask) {
1471 err_mask = AC_ERR_OTHER;
1472 action |= ATA_EH_HARDRESET;
1473 }
1474
1475 ehi->serror |= serr;
1476 ehi->action |= action;
1477
1478 if (qc)
1479 qc->err_mask |= err_mask;
1480 else
1481 ehi->err_mask |= err_mask;
1482
1483 if (edma_err_cause & eh_freeze_mask)
1484 ata_port_freeze(ap);
1485 else
1486 ata_port_abort(ap);
1487}
1488
1489static void mv_intr_pio(struct ata_port *ap)
1490{
1491 struct ata_queued_cmd *qc;
1492 u8 ata_status;
1493
1494 /* ignore spurious intr if drive still BUSY */
1495 ata_status = readb(ap->ioaddr.status_addr);
1496 if (unlikely(ata_status & ATA_BUSY))
1497 return;
1498
1499 /* get active ATA command */
9af5c9c9 1500 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1501 if (unlikely(!qc)) /* no active tag */
1502 return;
1503 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1504 return;
1505
1506 /* and finally, complete the ATA command */
1507 qc->err_mask |= ac_err_mask(ata_status);
1508 ata_qc_complete(qc);
1509}
1510
1511static void mv_intr_edma(struct ata_port *ap)
1512{
1513 void __iomem *port_mmio = mv_ap_base(ap);
1514 struct mv_host_priv *hpriv = ap->host->private_data;
1515 struct mv_port_priv *pp = ap->private_data;
1516 struct ata_queued_cmd *qc;
1517 u32 out_index, in_index;
1518 bool work_done = false;
1519
1520 /* get h/w response queue pointer */
1521 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1522 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1523
1524 while (1) {
1525 u16 status;
6c1153e0 1526 unsigned int tag;
bdd4ddde
JG
1527
1528 /* get s/w response queue last-read pointer, and compare */
1529 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1530 if (in_index == out_index)
1531 break;
1532
bdd4ddde 1533 /* 50xx: get active ATA command */
0ea9e179 1534 if (IS_GEN_I(hpriv))
9af5c9c9 1535 tag = ap->link.active_tag;
bdd4ddde 1536
6c1153e0
JG
1537 /* Gen II/IIE: get active ATA command via tag, to enable
1538 * support for queueing. this works transparently for
1539 * queued and non-queued modes.
bdd4ddde 1540 */
6c1153e0
JG
1541 else if (IS_GEN_II(hpriv))
1542 tag = (le16_to_cpu(pp->crpb[out_index].id)
1543 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1544
6c1153e0
JG
1545 else /* IS_GEN_IIE */
1546 tag = (le16_to_cpu(pp->crpb[out_index].id)
1547 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1548
6c1153e0 1549 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1550
1551 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1552 * bits (WARNING: might not necessarily be associated
1553 * with this command), which -should- be clear
1554 * if all is well
1555 */
1556 status = le16_to_cpu(pp->crpb[out_index].flags);
1557 if (unlikely(status & 0xff)) {
1558 mv_err_intr(ap, qc);
1559 return;
1560 }
1561
1562 /* and finally, complete the ATA command */
1563 if (qc) {
1564 qc->err_mask |=
1565 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1566 ata_qc_complete(qc);
1567 }
1568
0ea9e179 1569 /* advance software response queue pointer, to
bdd4ddde
JG
1570 * indicate (after the loop completes) to hardware
1571 * that we have consumed a response queue entry.
1572 */
1573 work_done = true;
1574 pp->resp_idx++;
1575 }
1576
1577 if (work_done)
1578 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1579 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1580 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1581}
1582
05b308e1
BR
1583/**
1584 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1585 * @host: host specific structure
05b308e1
BR
1586 * @relevant: port error bits relevant to this host controller
1587 * @hc: which host controller we're to look at
1588 *
1589 * Read then write clear the HC interrupt status then walk each
1590 * port connected to the HC and see if it needs servicing. Port
1591 * success ints are reported in the HC interrupt status reg, the
1592 * port error ints are reported in the higher level main
1593 * interrupt status register and thus are passed in via the
1594 * 'relevant' argument.
1595 *
1596 * LOCKING:
1597 * Inherited from caller.
1598 */
cca3974e 1599static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1600{
0d5ff566 1601 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1602 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1603 u32 hc_irq_cause;
c5d3e45a 1604 int port, port0;
20f733e7 1605
35177265 1606 if (hc == 0)
20f733e7 1607 port0 = 0;
35177265 1608 else
20f733e7 1609 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1610
1611 /* we'll need the HC success int register in most cases */
1612 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1613 if (!hc_irq_cause)
1614 return;
1615
1616 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1617
1618 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1619 hc, relevant, hc_irq_cause);
20f733e7
BR
1620
1621 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1622 struct ata_port *ap = host->ports[port];
63af2a5c 1623 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1624 int have_err_bits, hard_port, shift;
55d8ca4f 1625
bdd4ddde 1626 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1627 continue;
1628
31961943 1629 shift = port << 1; /* (port * 2) */
20f733e7
BR
1630 if (port >= MV_PORTS_PER_HC) {
1631 shift++; /* skip bit 8 in the HC Main IRQ reg */
1632 }
bdd4ddde
JG
1633 have_err_bits = ((PORT0_ERR << shift) & relevant);
1634
1635 if (unlikely(have_err_bits)) {
1636 struct ata_queued_cmd *qc;
8b260248 1637
9af5c9c9 1638 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1639 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1640 continue;
1641
1642 mv_err_intr(ap, qc);
1643 continue;
1644 }
1645
1646 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1647
1648 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1649 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1650 mv_intr_edma(ap);
1651 } else {
1652 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1653 mv_intr_pio(ap);
20f733e7
BR
1654 }
1655 }
1656 VPRINTK("EXIT\n");
1657}
1658
bdd4ddde
JG
1659static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1660{
02a121da 1661 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1662 struct ata_port *ap;
1663 struct ata_queued_cmd *qc;
1664 struct ata_eh_info *ehi;
1665 unsigned int i, err_mask, printed = 0;
1666 u32 err_cause;
1667
02a121da 1668 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1669
1670 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1671 err_cause);
1672
1673 DPRINTK("All regs @ PCI error\n");
1674 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1675
02a121da 1676 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1677
1678 for (i = 0; i < host->n_ports; i++) {
1679 ap = host->ports[i];
936fd732 1680 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1681 ehi = &ap->link.eh_info;
bdd4ddde
JG
1682 ata_ehi_clear_desc(ehi);
1683 if (!printed++)
1684 ata_ehi_push_desc(ehi,
1685 "PCI err cause 0x%08x", err_cause);
1686 err_mask = AC_ERR_HOST_BUS;
1687 ehi->action = ATA_EH_HARDRESET;
9af5c9c9 1688 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1689 if (qc)
1690 qc->err_mask |= err_mask;
1691 else
1692 ehi->err_mask |= err_mask;
1693
1694 ata_port_freeze(ap);
1695 }
1696 }
1697}
1698
05b308e1 1699/**
c5d3e45a 1700 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1701 * @irq: unused
1702 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1703 *
1704 * Read the read only register to determine if any host
1705 * controllers have pending interrupts. If so, call lower level
1706 * routine to handle. Also check for PCI errors which are only
1707 * reported here.
1708 *
8b260248 1709 * LOCKING:
cca3974e 1710 * This routine holds the host lock while processing pending
05b308e1
BR
1711 * interrupts.
1712 */
7d12e780 1713static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1714{
cca3974e 1715 struct ata_host *host = dev_instance;
20f733e7 1716 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1717 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7
BR
1718 u32 irq_stat;
1719
20f733e7 1720 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
20f733e7
BR
1721
1722 /* check the cases where we either have nothing pending or have read
1723 * a bogus register value which can indicate HW removal or PCI fault
1724 */
35177265 1725 if (!irq_stat || (0xffffffffU == irq_stat))
20f733e7 1726 return IRQ_NONE;
20f733e7 1727
cca3974e
JG
1728 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1729 spin_lock(&host->lock);
20f733e7 1730
bdd4ddde
JG
1731 if (unlikely(irq_stat & PCI_ERR)) {
1732 mv_pci_error(host, mmio);
1733 handled = 1;
1734 goto out_unlock; /* skip all other HC irq handling */
1735 }
1736
20f733e7
BR
1737 for (hc = 0; hc < n_hcs; hc++) {
1738 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1739 if (relevant) {
cca3974e 1740 mv_host_intr(host, relevant, hc);
bdd4ddde 1741 handled = 1;
20f733e7
BR
1742 }
1743 }
615ab953 1744
bdd4ddde 1745out_unlock:
cca3974e 1746 spin_unlock(&host->lock);
20f733e7
BR
1747
1748 return IRQ_RETVAL(handled);
1749}
1750
c9d39130
JG
1751static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1752{
1753 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1754 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1755
1756 return hc_mmio + ofs;
1757}
1758
1759static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1760{
1761 unsigned int ofs;
1762
1763 switch (sc_reg_in) {
1764 case SCR_STATUS:
1765 case SCR_ERROR:
1766 case SCR_CONTROL:
1767 ofs = sc_reg_in * sizeof(u32);
1768 break;
1769 default:
1770 ofs = 0xffffffffU;
1771 break;
1772 }
1773 return ofs;
1774}
1775
da3dbb17 1776static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1777{
0d5ff566
TH
1778 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1779 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1780 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1781
da3dbb17
TH
1782 if (ofs != 0xffffffffU) {
1783 *val = readl(addr + ofs);
1784 return 0;
1785 } else
1786 return -EINVAL;
c9d39130
JG
1787}
1788
da3dbb17 1789static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1790{
0d5ff566
TH
1791 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1792 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1793 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1794
da3dbb17 1795 if (ofs != 0xffffffffU) {
0d5ff566 1796 writelfl(val, addr + ofs);
da3dbb17
TH
1797 return 0;
1798 } else
1799 return -EINVAL;
c9d39130
JG
1800}
1801
522479fb
JG
1802static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1803{
522479fb
JG
1804 int early_5080;
1805
44c10138 1806 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1807
1808 if (!early_5080) {
1809 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1810 tmp |= (1 << 0);
1811 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1812 }
1813
1814 mv_reset_pci_bus(pdev, mmio);
1815}
1816
1817static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1818{
1819 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1820}
1821
47c2b677 1822static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1823 void __iomem *mmio)
1824{
c9d39130
JG
1825 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1826 u32 tmp;
1827
1828 tmp = readl(phy_mmio + MV5_PHY_MODE);
1829
1830 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1831 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1832}
1833
47c2b677 1834static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1835{
522479fb
JG
1836 u32 tmp;
1837
1838 writel(0, mmio + MV_GPIO_PORT_CTL);
1839
1840 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1841
1842 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1843 tmp |= ~(1 << 0);
1844 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1845}
1846
2a47ce06
JG
1847static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1848 unsigned int port)
bca1c4eb 1849{
c9d39130
JG
1850 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1851 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1852 u32 tmp;
1853 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1854
1855 if (fix_apm_sq) {
1856 tmp = readl(phy_mmio + MV5_LT_MODE);
1857 tmp |= (1 << 19);
1858 writel(tmp, phy_mmio + MV5_LT_MODE);
1859
1860 tmp = readl(phy_mmio + MV5_PHY_CTL);
1861 tmp &= ~0x3;
1862 tmp |= 0x1;
1863 writel(tmp, phy_mmio + MV5_PHY_CTL);
1864 }
1865
1866 tmp = readl(phy_mmio + MV5_PHY_MODE);
1867 tmp &= ~mask;
1868 tmp |= hpriv->signal[port].pre;
1869 tmp |= hpriv->signal[port].amps;
1870 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1871}
1872
c9d39130
JG
1873
1874#undef ZERO
1875#define ZERO(reg) writel(0, port_mmio + (reg))
1876static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1877 unsigned int port)
1878{
1879 void __iomem *port_mmio = mv_port_base(mmio, port);
1880
1881 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1882
1883 mv_channel_reset(hpriv, mmio, port);
1884
1885 ZERO(0x028); /* command */
1886 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1887 ZERO(0x004); /* timer */
1888 ZERO(0x008); /* irq err cause */
1889 ZERO(0x00c); /* irq err mask */
1890 ZERO(0x010); /* rq bah */
1891 ZERO(0x014); /* rq inp */
1892 ZERO(0x018); /* rq outp */
1893 ZERO(0x01c); /* respq bah */
1894 ZERO(0x024); /* respq outp */
1895 ZERO(0x020); /* respq inp */
1896 ZERO(0x02c); /* test control */
1897 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1898}
1899#undef ZERO
1900
1901#define ZERO(reg) writel(0, hc_mmio + (reg))
1902static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1903 unsigned int hc)
47c2b677 1904{
c9d39130
JG
1905 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1906 u32 tmp;
1907
1908 ZERO(0x00c);
1909 ZERO(0x010);
1910 ZERO(0x014);
1911 ZERO(0x018);
1912
1913 tmp = readl(hc_mmio + 0x20);
1914 tmp &= 0x1c1c1c1c;
1915 tmp |= 0x03030303;
1916 writel(tmp, hc_mmio + 0x20);
1917}
1918#undef ZERO
1919
1920static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1921 unsigned int n_hc)
1922{
1923 unsigned int hc, port;
1924
1925 for (hc = 0; hc < n_hc; hc++) {
1926 for (port = 0; port < MV_PORTS_PER_HC; port++)
1927 mv5_reset_hc_port(hpriv, mmio,
1928 (hc * MV_PORTS_PER_HC) + port);
1929
1930 mv5_reset_one_hc(hpriv, mmio, hc);
1931 }
1932
1933 return 0;
47c2b677
JG
1934}
1935
101ffae2
JG
1936#undef ZERO
1937#define ZERO(reg) writel(0, mmio + (reg))
1938static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1939{
02a121da
ML
1940 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1941 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
1942 u32 tmp;
1943
1944 tmp = readl(mmio + MV_PCI_MODE);
1945 tmp &= 0xff00ffff;
1946 writel(tmp, mmio + MV_PCI_MODE);
1947
1948 ZERO(MV_PCI_DISC_TIMER);
1949 ZERO(MV_PCI_MSI_TRIGGER);
1950 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1951 ZERO(HC_MAIN_IRQ_MASK_OFS);
1952 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
1953 ZERO(hpriv->irq_cause_ofs);
1954 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
1955 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1956 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1957 ZERO(MV_PCI_ERR_ATTRIBUTE);
1958 ZERO(MV_PCI_ERR_COMMAND);
1959}
1960#undef ZERO
1961
1962static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1963{
1964 u32 tmp;
1965
1966 mv5_reset_flash(hpriv, mmio);
1967
1968 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1969 tmp &= 0x3;
1970 tmp |= (1 << 5) | (1 << 6);
1971 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1972}
1973
1974/**
1975 * mv6_reset_hc - Perform the 6xxx global soft reset
1976 * @mmio: base address of the HBA
1977 *
1978 * This routine only applies to 6xxx parts.
1979 *
1980 * LOCKING:
1981 * Inherited from caller.
1982 */
c9d39130
JG
1983static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1984 unsigned int n_hc)
101ffae2
JG
1985{
1986 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1987 int i, rc = 0;
1988 u32 t;
1989
1990 /* Following procedure defined in PCI "main command and status
1991 * register" table.
1992 */
1993 t = readl(reg);
1994 writel(t | STOP_PCI_MASTER, reg);
1995
1996 for (i = 0; i < 1000; i++) {
1997 udelay(1);
1998 t = readl(reg);
2dcb407e 1999 if (PCI_MASTER_EMPTY & t)
101ffae2 2000 break;
101ffae2
JG
2001 }
2002 if (!(PCI_MASTER_EMPTY & t)) {
2003 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2004 rc = 1;
2005 goto done;
2006 }
2007
2008 /* set reset */
2009 i = 5;
2010 do {
2011 writel(t | GLOB_SFT_RST, reg);
2012 t = readl(reg);
2013 udelay(1);
2014 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2015
2016 if (!(GLOB_SFT_RST & t)) {
2017 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2018 rc = 1;
2019 goto done;
2020 }
2021
2022 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2023 i = 5;
2024 do {
2025 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2026 t = readl(reg);
2027 udelay(1);
2028 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2029
2030 if (GLOB_SFT_RST & t) {
2031 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2032 rc = 1;
2033 }
2034done:
2035 return rc;
2036}
2037
47c2b677 2038static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2039 void __iomem *mmio)
2040{
2041 void __iomem *port_mmio;
2042 u32 tmp;
2043
ba3fe8fb
JG
2044 tmp = readl(mmio + MV_RESET_CFG);
2045 if ((tmp & (1 << 0)) == 0) {
47c2b677 2046 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2047 hpriv->signal[idx].pre = 0x1 << 5;
2048 return;
2049 }
2050
2051 port_mmio = mv_port_base(mmio, idx);
2052 tmp = readl(port_mmio + PHY_MODE2);
2053
2054 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2055 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2056}
2057
47c2b677 2058static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2059{
47c2b677 2060 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2061}
2062
c9d39130 2063static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2064 unsigned int port)
bca1c4eb 2065{
c9d39130
JG
2066 void __iomem *port_mmio = mv_port_base(mmio, port);
2067
bca1c4eb 2068 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2069 int fix_phy_mode2 =
2070 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2071 int fix_phy_mode4 =
47c2b677
JG
2072 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2073 u32 m2, tmp;
2074
2075 if (fix_phy_mode2) {
2076 m2 = readl(port_mmio + PHY_MODE2);
2077 m2 &= ~(1 << 16);
2078 m2 |= (1 << 31);
2079 writel(m2, port_mmio + PHY_MODE2);
2080
2081 udelay(200);
2082
2083 m2 = readl(port_mmio + PHY_MODE2);
2084 m2 &= ~((1 << 16) | (1 << 31));
2085 writel(m2, port_mmio + PHY_MODE2);
2086
2087 udelay(200);
2088 }
2089
2090 /* who knows what this magic does */
2091 tmp = readl(port_mmio + PHY_MODE3);
2092 tmp &= ~0x7F800000;
2093 tmp |= 0x2A800000;
2094 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2095
2096 if (fix_phy_mode4) {
47c2b677 2097 u32 m4;
bca1c4eb
JG
2098
2099 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2100
2101 if (hp_flags & MV_HP_ERRATA_60X1B2)
2102 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2103
2104 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2105
2106 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2107
2108 if (hp_flags & MV_HP_ERRATA_60X1B2)
2109 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2110 }
2111
2112 /* Revert values of pre-emphasis and signal amps to the saved ones */
2113 m2 = readl(port_mmio + PHY_MODE2);
2114
2115 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2116 m2 |= hpriv->signal[port].amps;
2117 m2 |= hpriv->signal[port].pre;
47c2b677 2118 m2 &= ~(1 << 16);
bca1c4eb 2119
e4e7b892
JG
2120 /* according to mvSata 3.6.1, some IIE values are fixed */
2121 if (IS_GEN_IIE(hpriv)) {
2122 m2 &= ~0xC30FF01F;
2123 m2 |= 0x0000900F;
2124 }
2125
bca1c4eb
JG
2126 writel(m2, port_mmio + PHY_MODE2);
2127}
2128
c9d39130
JG
2129static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2130 unsigned int port_no)
2131{
2132 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2133
2134 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2135
ee9ccdf7 2136 if (IS_GEN_II(hpriv)) {
c9d39130 2137 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2138 ifctl |= (1 << 7); /* enable gen2i speed */
2139 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2140 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2141 }
2142
2143 udelay(25); /* allow reset propagation */
2144
2145 /* Spec never mentions clearing the bit. Marvell's driver does
2146 * clear the bit, however.
2147 */
2148 writelfl(0, port_mmio + EDMA_CMD_OFS);
2149
2150 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2151
ee9ccdf7 2152 if (IS_GEN_I(hpriv))
c9d39130
JG
2153 mdelay(1);
2154}
2155
05b308e1 2156/**
bdd4ddde 2157 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2158 * @ap: ATA channel to manipulate
2159 *
2160 * Part of this is taken from __sata_phy_reset and modified to
2161 * not sleep since this routine gets called from interrupt level.
2162 *
2163 * LOCKING:
2164 * Inherited from caller. This is coded to safe to call at
2165 * interrupt level, i.e. it does not sleep.
31961943 2166 */
bdd4ddde
JG
2167static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2168 unsigned long deadline)
20f733e7 2169{
095fec88 2170 struct mv_port_priv *pp = ap->private_data;
cca3974e 2171 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2172 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2173 int retry = 5;
2174 u32 sstatus;
20f733e7
BR
2175
2176 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2177
da3dbb17
TH
2178#ifdef DEBUG
2179 {
2180 u32 sstatus, serror, scontrol;
2181
2182 mv_scr_read(ap, SCR_STATUS, &sstatus);
2183 mv_scr_read(ap, SCR_ERROR, &serror);
2184 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2185 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2d79ab8f 2186 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
da3dbb17
TH
2187 }
2188#endif
20f733e7 2189
22374677
JG
2190 /* Issue COMRESET via SControl */
2191comreset_retry:
936fd732 2192 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2193 msleep(1);
22374677 2194
936fd732 2195 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2196 msleep(20);
22374677 2197
31961943 2198 do {
936fd732 2199 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2200 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2201 break;
22374677 2202
bdd4ddde 2203 msleep(1);
c5d3e45a 2204 } while (time_before(jiffies, deadline));
20f733e7 2205
22374677 2206 /* work around errata */
ee9ccdf7 2207 if (IS_GEN_II(hpriv) &&
22374677
JG
2208 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2209 (retry-- > 0))
2210 goto comreset_retry;
095fec88 2211
da3dbb17
TH
2212#ifdef DEBUG
2213 {
2214 u32 sstatus, serror, scontrol;
2215
2216 mv_scr_read(ap, SCR_STATUS, &sstatus);
2217 mv_scr_read(ap, SCR_ERROR, &serror);
2218 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2219 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2220 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2221 }
2222#endif
31961943 2223
936fd732 2224 if (ata_link_offline(&ap->link)) {
bdd4ddde 2225 *class = ATA_DEV_NONE;
20f733e7
BR
2226 return;
2227 }
2228
22374677
JG
2229 /* even after SStatus reflects that device is ready,
2230 * it seems to take a while for link to be fully
2231 * established (and thus Status no longer 0x80/0x7F),
2232 * so we poll a bit for that, here.
2233 */
2234 retry = 20;
2235 while (1) {
2236 u8 drv_stat = ata_check_status(ap);
2237 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2238 break;
bdd4ddde 2239 msleep(500);
22374677
JG
2240 if (retry-- <= 0)
2241 break;
bdd4ddde
JG
2242 if (time_after(jiffies, deadline))
2243 break;
22374677
JG
2244 }
2245
bdd4ddde
JG
2246 /* FIXME: if we passed the deadline, the following
2247 * code probably produces an invalid result
2248 */
20f733e7 2249
bdd4ddde 2250 /* finally, read device signature from TF registers */
3f19859e 2251 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2252
2253 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2254
bdd4ddde 2255 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2256
bca1c4eb 2257 VPRINTK("EXIT\n");
20f733e7
BR
2258}
2259
cc0680a5 2260static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2261{
cc0680a5 2262 struct ata_port *ap = link->ap;
bdd4ddde 2263 struct mv_port_priv *pp = ap->private_data;
cc0680a5 2264 struct ata_eh_context *ehc = &link->eh_context;
bdd4ddde 2265 int rc;
0ea9e179 2266
bdd4ddde
JG
2267 rc = mv_stop_dma(ap);
2268 if (rc)
2269 ehc->i.action |= ATA_EH_HARDRESET;
2270
2271 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2272 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2273 ehc->i.action |= ATA_EH_HARDRESET;
2274 }
2275
2276 /* if we're about to do hardreset, nothing more to do */
2277 if (ehc->i.action & ATA_EH_HARDRESET)
2278 return 0;
2279
cc0680a5 2280 if (ata_link_online(link))
bdd4ddde
JG
2281 rc = ata_wait_ready(ap, deadline);
2282 else
2283 rc = -ENODEV;
2284
2285 return rc;
22374677
JG
2286}
2287
cc0680a5 2288static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2289 unsigned long deadline)
31961943 2290{
cc0680a5 2291 struct ata_port *ap = link->ap;
bdd4ddde 2292 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2293 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2294
bdd4ddde 2295 mv_stop_dma(ap);
31961943 2296
bdd4ddde 2297 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2298
bdd4ddde
JG
2299 mv_phy_reset(ap, class, deadline);
2300
2301 return 0;
2302}
2303
cc0680a5 2304static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2305{
cc0680a5 2306 struct ata_port *ap = link->ap;
bdd4ddde
JG
2307 u32 serr;
2308
2309 /* print link status */
cc0680a5 2310 sata_print_link_status(link);
31961943 2311
bdd4ddde 2312 /* clear SError */
cc0680a5
TH
2313 sata_scr_read(link, SCR_ERROR, &serr);
2314 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2315
2316 /* bail out if no device is present */
2317 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2318 DPRINTK("EXIT, no device\n");
2319 return;
9b358e30 2320 }
bdd4ddde
JG
2321
2322 /* set up device control */
2323 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2324}
2325
2326static void mv_error_handler(struct ata_port *ap)
2327{
2328 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2329 mv_hardreset, mv_postreset);
2330}
2331
2332static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2333{
2334 mv_stop_dma(qc->ap);
2335}
2336
2337static void mv_eh_freeze(struct ata_port *ap)
2338{
2339 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2340 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2341 u32 tmp, mask;
2342 unsigned int shift;
2343
2344 /* FIXME: handle coalescing completion events properly */
2345
2346 shift = ap->port_no * 2;
2347 if (hc > 0)
2348 shift++;
2349
2350 mask = 0x3 << shift;
2351
2352 /* disable assertion of portN err, done events */
2353 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2354 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2355}
2356
2357static void mv_eh_thaw(struct ata_port *ap)
2358{
2359 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2360 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2361 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2362 void __iomem *port_mmio = mv_ap_base(ap);
2363 u32 tmp, mask, hc_irq_cause;
2364 unsigned int shift, hc_port_no = ap->port_no;
2365
2366 /* FIXME: handle coalescing completion events properly */
2367
2368 shift = ap->port_no * 2;
2369 if (hc > 0) {
2370 shift++;
2371 hc_port_no -= 4;
2372 }
2373
2374 mask = 0x3 << shift;
2375
2376 /* clear EDMA errors on this port */
2377 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2378
2379 /* clear pending irq events */
2380 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2381 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2382 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2383 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2384
2385 /* enable assertion of portN err, done events */
2386 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2387 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2388}
2389
05b308e1
BR
2390/**
2391 * mv_port_init - Perform some early initialization on a single port.
2392 * @port: libata data structure storing shadow register addresses
2393 * @port_mmio: base address of the port
2394 *
2395 * Initialize shadow register mmio addresses, clear outstanding
2396 * interrupts on the port, and unmask interrupts for the future
2397 * start of the port.
2398 *
2399 * LOCKING:
2400 * Inherited from caller.
2401 */
31961943 2402static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2403{
0d5ff566 2404 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2405 unsigned serr_ofs;
2406
8b260248 2407 /* PIO related setup
31961943
BR
2408 */
2409 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2410 port->error_addr =
31961943
BR
2411 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2412 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2413 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2414 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2415 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2416 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2417 port->status_addr =
31961943
BR
2418 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2419 /* special case: control/altstatus doesn't have ATA_REG_ address */
2420 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2421
2422 /* unused: */
8d9db2d2 2423 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2424
31961943
BR
2425 /* Clear any currently outstanding port interrupt conditions */
2426 serr_ofs = mv_scr_offset(SCR_ERROR);
2427 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2428 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2429
20f733e7 2430 /* unmask all EDMA error interrupts */
31961943 2431 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2432
8b260248 2433 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2434 readl(port_mmio + EDMA_CFG_OFS),
2435 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2436 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2437}
2438
4447d351 2439static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2440{
4447d351
TH
2441 struct pci_dev *pdev = to_pci_dev(host->dev);
2442 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2443 u32 hp_flags = hpriv->hp_flags;
2444
5796d1c4 2445 switch (board_idx) {
47c2b677
JG
2446 case chip_5080:
2447 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2448 hp_flags |= MV_HP_GEN_I;
47c2b677 2449
44c10138 2450 switch (pdev->revision) {
47c2b677
JG
2451 case 0x1:
2452 hp_flags |= MV_HP_ERRATA_50XXB0;
2453 break;
2454 case 0x3:
2455 hp_flags |= MV_HP_ERRATA_50XXB2;
2456 break;
2457 default:
2458 dev_printk(KERN_WARNING, &pdev->dev,
2459 "Applying 50XXB2 workarounds to unknown rev\n");
2460 hp_flags |= MV_HP_ERRATA_50XXB2;
2461 break;
2462 }
2463 break;
2464
bca1c4eb
JG
2465 case chip_504x:
2466 case chip_508x:
47c2b677 2467 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2468 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2469
44c10138 2470 switch (pdev->revision) {
47c2b677
JG
2471 case 0x0:
2472 hp_flags |= MV_HP_ERRATA_50XXB0;
2473 break;
2474 case 0x3:
2475 hp_flags |= MV_HP_ERRATA_50XXB2;
2476 break;
2477 default:
2478 dev_printk(KERN_WARNING, &pdev->dev,
2479 "Applying B2 workarounds to unknown rev\n");
2480 hp_flags |= MV_HP_ERRATA_50XXB2;
2481 break;
bca1c4eb
JG
2482 }
2483 break;
2484
2485 case chip_604x:
2486 case chip_608x:
47c2b677 2487 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2488 hp_flags |= MV_HP_GEN_II;
47c2b677 2489
44c10138 2490 switch (pdev->revision) {
47c2b677
JG
2491 case 0x7:
2492 hp_flags |= MV_HP_ERRATA_60X1B2;
2493 break;
2494 case 0x9:
2495 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2496 break;
2497 default:
2498 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2499 "Applying B2 workarounds to unknown rev\n");
2500 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2501 break;
2502 }
2503 break;
2504
e4e7b892 2505 case chip_7042:
02a121da 2506 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2507 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2508 (pdev->device == 0x2300 || pdev->device == 0x2310))
2509 {
4e520033
ML
2510 /*
2511 * Highpoint RocketRAID PCIe 23xx series cards:
2512 *
2513 * Unconfigured drives are treated as "Legacy"
2514 * by the BIOS, and it overwrites sector 8 with
2515 * a "Lgcy" metadata block prior to Linux boot.
2516 *
2517 * Configured drives (RAID or JBOD) leave sector 8
2518 * alone, but instead overwrite a high numbered
2519 * sector for the RAID metadata. This sector can
2520 * be determined exactly, by truncating the physical
2521 * drive capacity to a nice even GB value.
2522 *
2523 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2524 *
2525 * Warn the user, lest they think we're just buggy.
2526 */
2527 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2528 " BIOS CORRUPTS DATA on all attached drives,"
2529 " regardless of if/how they are configured."
2530 " BEWARE!\n");
2531 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2532 " use sectors 8-9 on \"Legacy\" drives,"
2533 " and avoid the final two gigabytes on"
2534 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2535 }
e4e7b892
JG
2536 case chip_6042:
2537 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2538 hp_flags |= MV_HP_GEN_IIE;
2539
44c10138 2540 switch (pdev->revision) {
e4e7b892
JG
2541 case 0x0:
2542 hp_flags |= MV_HP_ERRATA_XX42A0;
2543 break;
2544 case 0x1:
2545 hp_flags |= MV_HP_ERRATA_60X1C0;
2546 break;
2547 default:
2548 dev_printk(KERN_WARNING, &pdev->dev,
2549 "Applying 60X1C0 workarounds to unknown rev\n");
2550 hp_flags |= MV_HP_ERRATA_60X1C0;
2551 break;
2552 }
2553 break;
2554
bca1c4eb 2555 default:
5796d1c4
JG
2556 dev_printk(KERN_ERR, &pdev->dev,
2557 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2558 return 1;
2559 }
2560
2561 hpriv->hp_flags = hp_flags;
02a121da
ML
2562 if (hp_flags & MV_HP_PCIE) {
2563 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2564 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2565 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2566 } else {
2567 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2568 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2569 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2570 }
bca1c4eb
JG
2571
2572 return 0;
2573}
2574
05b308e1 2575/**
47c2b677 2576 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2577 * @host: ATA host to initialize
2578 * @board_idx: controller index
05b308e1
BR
2579 *
2580 * If possible, do an early global reset of the host. Then do
2581 * our port init and clear/unmask all/relevant host interrupts.
2582 *
2583 * LOCKING:
2584 * Inherited from caller.
2585 */
4447d351 2586static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2587{
2588 int rc = 0, n_hc, port, hc;
4447d351
TH
2589 struct pci_dev *pdev = to_pci_dev(host->dev);
2590 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2591 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2592
47c2b677
JG
2593 /* global interrupt mask */
2594 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2595
4447d351 2596 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2597 if (rc)
2598 goto done;
2599
4447d351 2600 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2601
4447d351 2602 for (port = 0; port < host->n_ports; port++)
47c2b677 2603 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2604
c9d39130 2605 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2606 if (rc)
20f733e7 2607 goto done;
20f733e7 2608
522479fb
JG
2609 hpriv->ops->reset_flash(hpriv, mmio);
2610 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2611 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2612
4447d351 2613 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2614 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2615 void __iomem *port_mmio = mv_port_base(mmio, port);
2616
2a47ce06 2617 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2618 ifctl |= (1 << 7); /* enable gen2i speed */
2619 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2620 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2621 }
2622
c9d39130 2623 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2624 }
2625
4447d351 2626 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2627 struct ata_port *ap = host->ports[port];
2a47ce06 2628 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2629 unsigned int offset = port_mmio - mmio;
2630
2631 mv_port_init(&ap->ioaddr, port_mmio);
2632
2633 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2634 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
20f733e7
BR
2635 }
2636
2637 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2638 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2639
2640 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2641 "(before clear)=0x%08x\n", hc,
2642 readl(hc_mmio + HC_CFG_OFS),
2643 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2644
2645 /* Clear any currently outstanding hc interrupt conditions */
2646 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2647 }
2648
31961943 2649 /* Clear any currently outstanding host interrupt conditions */
02a121da 2650 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943
BR
2651
2652 /* and unmask interrupt generation for host regs */
02a121da 2653 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
fb621e2f 2654
ee9ccdf7 2655 if (IS_GEN_I(hpriv))
fb621e2f
JG
2656 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2657 else
2658 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2659
2660 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2661 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2662 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2663 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
02a121da
ML
2664 readl(mmio + hpriv->irq_cause_ofs),
2665 readl(mmio + hpriv->irq_mask_ofs));
bca1c4eb 2666
31961943 2667done:
20f733e7
BR
2668 return rc;
2669}
2670
05b308e1
BR
2671/**
2672 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2673 * @host: ATA host to print info about
05b308e1
BR
2674 *
2675 * FIXME: complete this.
2676 *
2677 * LOCKING:
2678 * Inherited from caller.
2679 */
4447d351 2680static void mv_print_info(struct ata_host *host)
31961943 2681{
4447d351
TH
2682 struct pci_dev *pdev = to_pci_dev(host->dev);
2683 struct mv_host_priv *hpriv = host->private_data;
44c10138 2684 u8 scc;
c1e4fe71 2685 const char *scc_s, *gen;
31961943
BR
2686
2687 /* Use this to determine the HW stepping of the chip so we know
2688 * what errata to workaround
2689 */
31961943
BR
2690 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2691 if (scc == 0)
2692 scc_s = "SCSI";
2693 else if (scc == 0x01)
2694 scc_s = "RAID";
2695 else
c1e4fe71
JG
2696 scc_s = "?";
2697
2698 if (IS_GEN_I(hpriv))
2699 gen = "I";
2700 else if (IS_GEN_II(hpriv))
2701 gen = "II";
2702 else if (IS_GEN_IIE(hpriv))
2703 gen = "IIE";
2704 else
2705 gen = "?";
31961943 2706
a9524a76 2707 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2708 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2709 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2710 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2711}
2712
05b308e1
BR
2713/**
2714 * mv_init_one - handle a positive probe of a Marvell host
2715 * @pdev: PCI device found
2716 * @ent: PCI device ID entry for the matched host
2717 *
2718 * LOCKING:
2719 * Inherited from caller.
2720 */
20f733e7
BR
2721static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2722{
2dcb407e 2723 static int printed_version;
20f733e7 2724 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2725 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2726 struct ata_host *host;
2727 struct mv_host_priv *hpriv;
2728 int n_ports, rc;
20f733e7 2729
a9524a76
JG
2730 if (!printed_version++)
2731 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2732
4447d351
TH
2733 /* allocate host */
2734 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2735
2736 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2737 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2738 if (!host || !hpriv)
2739 return -ENOMEM;
2740 host->private_data = hpriv;
2741
2742 /* acquire resources */
24dc5f33
TH
2743 rc = pcim_enable_device(pdev);
2744 if (rc)
20f733e7 2745 return rc;
20f733e7 2746
0d5ff566
TH
2747 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2748 if (rc == -EBUSY)
24dc5f33 2749 pcim_pin_device(pdev);
0d5ff566 2750 if (rc)
24dc5f33 2751 return rc;
4447d351 2752 host->iomap = pcim_iomap_table(pdev);
20f733e7 2753
d88184fb
JG
2754 rc = pci_go_64(pdev);
2755 if (rc)
2756 return rc;
2757
20f733e7 2758 /* initialize adapter */
4447d351 2759 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2760 if (rc)
2761 return rc;
20f733e7 2762
31961943 2763 /* Enable interrupts */
6a59dcf8 2764 if (msi && pci_enable_msi(pdev))
31961943 2765 pci_intx(pdev, 1);
20f733e7 2766
31961943 2767 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2768 mv_print_info(host);
20f733e7 2769
4447d351 2770 pci_set_master(pdev);
ea8b4db9 2771 pci_try_set_mwi(pdev);
4447d351 2772 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2773 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2774}
2775
2776static int __init mv_init(void)
2777{
b7887196 2778 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2779}
2780
2781static void __exit mv_exit(void)
2782{
2783 pci_unregister_driver(&mv_pci_driver);
2784}
2785
2786MODULE_AUTHOR("Brett Russ");
2787MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2788MODULE_LICENSE("GPL");
2789MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2790MODULE_VERSION(DRV_VERSION);
2791
ddef9bb3
JG
2792module_param(msi, int, 0444);
2793MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2794
20f733e7
BR
2795module_init(mv_init);
2796module_exit(mv_exit);
This page took 0.738655 seconds and 5 git commands to generate.