sata_mv ncq EH fixes
[deliverable/linux.git] / drivers / ata / sata_mv.c
CommitLineData
20f733e7
BR
1/*
2 * sata_mv.c - Marvell SATA support
3 *
8b260248 4 * Copyright 2005: EMC Corporation, all rights reserved.
e2b1be56 5 * Copyright 2005 Red Hat, Inc. All rights reserved.
20f733e7
BR
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
4a05e209
JG
24/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
4a05e209
JG
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
4a05e209
JG
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58*/
59
60
20f733e7
BR
61#include <linux/kernel.h>
62#include <linux/module.h>
63#include <linux/pci.h>
64#include <linux/init.h>
65#include <linux/blkdev.h>
66#include <linux/delay.h>
67#include <linux/interrupt.h>
20f733e7 68#include <linux/dma-mapping.h>
a9524a76 69#include <linux/device.h>
20f733e7 70#include <scsi/scsi_host.h>
193515d5 71#include <scsi/scsi_cmnd.h>
6c08772e 72#include <scsi/scsi_device.h>
20f733e7 73#include <linux/libata.h>
20f733e7
BR
74
75#define DRV_NAME "sata_mv"
6c08772e 76#define DRV_VERSION "1.01"
20f733e7
BR
77
78enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
615ab953
ML
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
20f733e7 95 MV_SATAHC0_REG_BASE = 0x20000,
522479fb 96 MV_FLASH_CTL = 0x1046c,
bca1c4eb
JG
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
20f733e7
BR
99
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
31961943
BR
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
20f733e7
BR
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
31961943 122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
20f733e7
BR
123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
c5d3e45a 128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
bdd4ddde
JG
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
47c2b677 131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
20f733e7 132
31961943
BR
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
c5d3e45a
JG
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
31961943
BR
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
c5d3e45a
JG
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
31961943
BR
144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
20f733e7
BR
147 /* PCI interface registers */
148
31961943
BR
149 PCI_COMMAND_OFS = 0xc00,
150
20f733e7
BR
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
522479fb
JG
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
02a121da
ML
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
20f733e7
BR
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
02a121da
ML
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x70a, /* assorted bits */
174
20f733e7
BR
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
fb621e2f
JG
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
20f733e7
BR
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
fb621e2f 191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
8b260248 192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
20f733e7
BR
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
fb621e2f
JG
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
20f733e7
BR
197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
31961943 202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
20f733e7
BR
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
31961943
BR
207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
20f733e7
BR
209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
47c2b677 213 PHY_MODE3 = 0x310,
bca1c4eb
JG
214 PHY_MODE4 = 0x314,
215 PHY_MODE2 = 0x330,
c9d39130
JG
216 MV5_PHY_MODE = 0x74,
217 MV5_LT_MODE = 0x30,
218 MV5_PHY_CTL = 0x0C,
bca1c4eb
JG
219 SATA_INTERFACE_CTL = 0x050,
220
221 MV_M2_PREAMP_MASK = 0x7e0,
20f733e7
BR
222
223 /* Port registers */
224 EDMA_CFG_OFS = 0,
31961943
BR
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
20f733e7
BR
230
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
6c1153e0
JG
233 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
234 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
235 EDMA_ERR_DEV = (1 << 2), /* device error */
236 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
237 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
238 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
c5d3e45a
JG
239 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
240 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
6c1153e0 241 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
c5d3e45a 242 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
6c1153e0
JG
243 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
244 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
245 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
246 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
20f733e7 248 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
6c1153e0
JG
249 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
250 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
251 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
252 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
c5d3e45a
JG
253 EDMA_ERR_OVERRUN_5 = (1 << 5),
254 EDMA_ERR_UNDERRUN_5 = (1 << 6),
bdd4ddde
JG
255 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
256 EDMA_ERR_PRD_PAR |
257 EDMA_ERR_DEV_DCON |
258 EDMA_ERR_DEV_CON |
259 EDMA_ERR_SERR |
260 EDMA_ERR_SELF_DIS |
6c1153e0 261 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
262 EDMA_ERR_CRPB_PAR |
263 EDMA_ERR_INTRL_PAR |
264 EDMA_ERR_IORDY |
265 EDMA_ERR_LNK_CTRL_RX_2 |
266 EDMA_ERR_LNK_DATA_RX |
267 EDMA_ERR_LNK_DATA_TX |
268 EDMA_ERR_TRANS_PROTO,
269 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
270 EDMA_ERR_PRD_PAR |
271 EDMA_ERR_DEV_DCON |
272 EDMA_ERR_DEV_CON |
273 EDMA_ERR_OVERRUN_5 |
274 EDMA_ERR_UNDERRUN_5 |
275 EDMA_ERR_SELF_DIS_5 |
6c1153e0 276 EDMA_ERR_CRQB_PAR |
bdd4ddde
JG
277 EDMA_ERR_CRPB_PAR |
278 EDMA_ERR_INTRL_PAR |
279 EDMA_ERR_IORDY,
20f733e7 280
31961943
BR
281 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
282 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
31961943
BR
283
284 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
285 EDMA_REQ_Q_PTR_SHIFT = 5,
286
287 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
288 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
289 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
31961943
BR
290 EDMA_RSP_Q_PTR_SHIFT = 3,
291
0ea9e179
JG
292 EDMA_CMD_OFS = 0x28, /* EDMA command register */
293 EDMA_EN = (1 << 0), /* enable EDMA */
294 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
295 ATA_RST = (1 << 2), /* reset trans/link/phy */
20f733e7 296
c9d39130 297 EDMA_IORDY_TMOUT = 0x34,
bca1c4eb 298 EDMA_ARB_CFG = 0x38,
bca1c4eb 299
31961943
BR
300 /* Host private flags (hp_flags) */
301 MV_HP_FLAG_MSI = (1 << 0),
47c2b677
JG
302 MV_HP_ERRATA_50XXB0 = (1 << 1),
303 MV_HP_ERRATA_50XXB2 = (1 << 2),
304 MV_HP_ERRATA_60X1B2 = (1 << 3),
305 MV_HP_ERRATA_60X1C0 = (1 << 4),
e4e7b892 306 MV_HP_ERRATA_XX42A0 = (1 << 5),
0ea9e179
JG
307 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
308 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
309 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
02a121da 310 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
20f733e7 311
31961943 312 /* Port private flags (pp_flags) */
0ea9e179
JG
313 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
314 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
20f733e7
BR
315};
316
ee9ccdf7
JG
317#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
318#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
e4e7b892 319#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
bca1c4eb 320
095fec88 321enum {
baf14aa1
JG
322 /* DMA boundary 0xffff is required by the s/g splitting
323 * we need on /length/ in mv_fill-sg().
324 */
325 MV_DMA_BOUNDARY = 0xffffU,
095fec88 326
0ea9e179
JG
327 /* mask of register bits containing lower 32 bits
328 * of EDMA request queue DMA address
329 */
095fec88
JG
330 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
331
0ea9e179 332 /* ditto, for response queue */
095fec88
JG
333 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
334};
335
522479fb
JG
336enum chip_type {
337 chip_504x,
338 chip_508x,
339 chip_5080,
340 chip_604x,
341 chip_608x,
e4e7b892
JG
342 chip_6042,
343 chip_7042,
522479fb
JG
344};
345
31961943
BR
346/* Command ReQuest Block: 32B */
347struct mv_crqb {
e1469874
ML
348 __le32 sg_addr;
349 __le32 sg_addr_hi;
350 __le16 ctrl_flags;
351 __le16 ata_cmd[11];
31961943 352};
20f733e7 353
e4e7b892 354struct mv_crqb_iie {
e1469874
ML
355 __le32 addr;
356 __le32 addr_hi;
357 __le32 flags;
358 __le32 len;
359 __le32 ata_cmd[4];
e4e7b892
JG
360};
361
31961943
BR
362/* Command ResPonse Block: 8B */
363struct mv_crpb {
e1469874
ML
364 __le16 id;
365 __le16 flags;
366 __le32 tmstmp;
20f733e7
BR
367};
368
31961943
BR
369/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
370struct mv_sg {
e1469874
ML
371 __le32 addr;
372 __le32 flags_size;
373 __le32 addr_hi;
374 __le32 reserved;
31961943 375};
20f733e7 376
31961943
BR
377struct mv_port_priv {
378 struct mv_crqb *crqb;
379 dma_addr_t crqb_dma;
380 struct mv_crpb *crpb;
381 dma_addr_t crpb_dma;
382 struct mv_sg *sg_tbl;
383 dma_addr_t sg_tbl_dma;
bdd4ddde
JG
384
385 unsigned int req_idx;
386 unsigned int resp_idx;
387
31961943
BR
388 u32 pp_flags;
389};
390
bca1c4eb
JG
391struct mv_port_signal {
392 u32 amps;
393 u32 pre;
394};
395
02a121da
ML
396struct mv_host_priv {
397 u32 hp_flags;
398 struct mv_port_signal signal[8];
399 const struct mv_hw_ops *ops;
400 u32 irq_cause_ofs;
401 u32 irq_mask_ofs;
402 u32 unmask_all_irqs;
403};
404
47c2b677 405struct mv_hw_ops {
2a47ce06
JG
406 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
407 unsigned int port);
47c2b677
JG
408 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
409 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
410 void __iomem *mmio);
c9d39130
JG
411 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
412 unsigned int n_hc);
522479fb
JG
413 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
414 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
47c2b677
JG
415};
416
20f733e7 417static void mv_irq_clear(struct ata_port *ap);
da3dbb17
TH
418static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
419static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
420static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
421static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
31961943
BR
422static int mv_port_start(struct ata_port *ap);
423static void mv_port_stop(struct ata_port *ap);
424static void mv_qc_prep(struct ata_queued_cmd *qc);
e4e7b892 425static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
9a3d9eb0 426static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
bdd4ddde
JG
427static void mv_error_handler(struct ata_port *ap);
428static void mv_post_int_cmd(struct ata_queued_cmd *qc);
429static void mv_eh_freeze(struct ata_port *ap);
430static void mv_eh_thaw(struct ata_port *ap);
20f733e7
BR
431static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
432
2a47ce06
JG
433static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
434 unsigned int port);
47c2b677
JG
435static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
436static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
437 void __iomem *mmio);
c9d39130
JG
438static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
439 unsigned int n_hc);
522479fb
JG
440static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
441static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
47c2b677 442
2a47ce06
JG
443static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
444 unsigned int port);
47c2b677
JG
445static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
446static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
447 void __iomem *mmio);
c9d39130
JG
448static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
449 unsigned int n_hc);
522479fb
JG
450static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
451static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
c9d39130
JG
452static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
453 unsigned int port_no);
47c2b677 454
c5d3e45a
JG
455static struct scsi_host_template mv5_sht = {
456 .module = THIS_MODULE,
457 .name = DRV_NAME,
458 .ioctl = ata_scsi_ioctl,
459 .queuecommand = ata_scsi_queuecmd,
460 .can_queue = ATA_DEF_QUEUE,
461 .this_id = ATA_SHT_THIS_ID,
baf14aa1 462 .sg_tablesize = MV_MAX_SG_CT / 2,
c5d3e45a
JG
463 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
464 .emulated = ATA_SHT_EMULATED,
465 .use_clustering = 1,
466 .proc_name = DRV_NAME,
467 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 468 .slave_configure = ata_scsi_slave_config,
c5d3e45a
JG
469 .slave_destroy = ata_scsi_slave_destroy,
470 .bios_param = ata_std_bios_param,
471};
472
473static struct scsi_host_template mv6_sht = {
20f733e7
BR
474 .module = THIS_MODULE,
475 .name = DRV_NAME,
476 .ioctl = ata_scsi_ioctl,
477 .queuecommand = ata_scsi_queuecmd,
c5d3e45a 478 .can_queue = ATA_DEF_QUEUE,
20f733e7 479 .this_id = ATA_SHT_THIS_ID,
baf14aa1 480 .sg_tablesize = MV_MAX_SG_CT / 2,
20f733e7
BR
481 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
482 .emulated = ATA_SHT_EMULATED,
d88184fb 483 .use_clustering = 1,
20f733e7
BR
484 .proc_name = DRV_NAME,
485 .dma_boundary = MV_DMA_BOUNDARY,
3be6cbd7 486 .slave_configure = ata_scsi_slave_config,
ccf68c34 487 .slave_destroy = ata_scsi_slave_destroy,
20f733e7 488 .bios_param = ata_std_bios_param,
20f733e7
BR
489};
490
c9d39130 491static const struct ata_port_operations mv5_ops = {
c9d39130
JG
492 .tf_load = ata_tf_load,
493 .tf_read = ata_tf_read,
494 .check_status = ata_check_status,
495 .exec_command = ata_exec_command,
496 .dev_select = ata_std_dev_select,
497
cffacd85 498 .cable_detect = ata_cable_sata,
c9d39130
JG
499
500 .qc_prep = mv_qc_prep,
501 .qc_issue = mv_qc_issue,
0d5ff566 502 .data_xfer = ata_data_xfer,
c9d39130 503
c9d39130 504 .irq_clear = mv_irq_clear,
246ce3b6 505 .irq_on = ata_irq_on,
c9d39130 506
bdd4ddde
JG
507 .error_handler = mv_error_handler,
508 .post_internal_cmd = mv_post_int_cmd,
509 .freeze = mv_eh_freeze,
510 .thaw = mv_eh_thaw,
511
c9d39130
JG
512 .scr_read = mv5_scr_read,
513 .scr_write = mv5_scr_write,
514
515 .port_start = mv_port_start,
516 .port_stop = mv_port_stop,
c9d39130
JG
517};
518
519static const struct ata_port_operations mv6_ops = {
20f733e7
BR
520 .tf_load = ata_tf_load,
521 .tf_read = ata_tf_read,
522 .check_status = ata_check_status,
523 .exec_command = ata_exec_command,
524 .dev_select = ata_std_dev_select,
525
cffacd85 526 .cable_detect = ata_cable_sata,
20f733e7 527
31961943
BR
528 .qc_prep = mv_qc_prep,
529 .qc_issue = mv_qc_issue,
0d5ff566 530 .data_xfer = ata_data_xfer,
20f733e7 531
20f733e7 532 .irq_clear = mv_irq_clear,
246ce3b6 533 .irq_on = ata_irq_on,
20f733e7 534
bdd4ddde
JG
535 .error_handler = mv_error_handler,
536 .post_internal_cmd = mv_post_int_cmd,
537 .freeze = mv_eh_freeze,
538 .thaw = mv_eh_thaw,
539
20f733e7
BR
540 .scr_read = mv_scr_read,
541 .scr_write = mv_scr_write,
542
31961943
BR
543 .port_start = mv_port_start,
544 .port_stop = mv_port_stop,
20f733e7
BR
545};
546
e4e7b892 547static const struct ata_port_operations mv_iie_ops = {
e4e7b892
JG
548 .tf_load = ata_tf_load,
549 .tf_read = ata_tf_read,
550 .check_status = ata_check_status,
551 .exec_command = ata_exec_command,
552 .dev_select = ata_std_dev_select,
553
cffacd85 554 .cable_detect = ata_cable_sata,
e4e7b892
JG
555
556 .qc_prep = mv_qc_prep_iie,
557 .qc_issue = mv_qc_issue,
0d5ff566 558 .data_xfer = ata_data_xfer,
e4e7b892 559
e4e7b892 560 .irq_clear = mv_irq_clear,
246ce3b6 561 .irq_on = ata_irq_on,
e4e7b892 562
bdd4ddde
JG
563 .error_handler = mv_error_handler,
564 .post_internal_cmd = mv_post_int_cmd,
565 .freeze = mv_eh_freeze,
566 .thaw = mv_eh_thaw,
567
e4e7b892
JG
568 .scr_read = mv_scr_read,
569 .scr_write = mv_scr_write,
570
571 .port_start = mv_port_start,
572 .port_stop = mv_port_stop,
e4e7b892
JG
573};
574
98ac62de 575static const struct ata_port_info mv_port_info[] = {
20f733e7 576 { /* chip_504x */
cca3974e 577 .flags = MV_COMMON_FLAGS,
31961943 578 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 579 .udma_mask = ATA_UDMA6,
c9d39130 580 .port_ops = &mv5_ops,
20f733e7
BR
581 },
582 { /* chip_508x */
c5d3e45a 583 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
31961943 584 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 585 .udma_mask = ATA_UDMA6,
c9d39130 586 .port_ops = &mv5_ops,
20f733e7 587 },
47c2b677 588 { /* chip_5080 */
c5d3e45a 589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
47c2b677 590 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 591 .udma_mask = ATA_UDMA6,
c9d39130 592 .port_ops = &mv5_ops,
47c2b677 593 },
20f733e7 594 { /* chip_604x */
c5d3e45a 595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
31961943 596 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 597 .udma_mask = ATA_UDMA6,
c9d39130 598 .port_ops = &mv6_ops,
20f733e7
BR
599 },
600 { /* chip_608x */
c5d3e45a
JG
601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602 MV_FLAG_DUAL_HC,
31961943 603 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 604 .udma_mask = ATA_UDMA6,
c9d39130 605 .port_ops = &mv6_ops,
20f733e7 606 },
e4e7b892 607 { /* chip_6042 */
c5d3e45a 608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 609 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 610 .udma_mask = ATA_UDMA6,
e4e7b892
JG
611 .port_ops = &mv_iie_ops,
612 },
613 { /* chip_7042 */
c5d3e45a 614 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
e4e7b892 615 .pio_mask = 0x1f, /* pio0-4 */
bf6263a8 616 .udma_mask = ATA_UDMA6,
e4e7b892
JG
617 .port_ops = &mv_iie_ops,
618 },
20f733e7
BR
619};
620
3b7d697d 621static const struct pci_device_id mv_pci_tbl[] = {
2d2744fc
JG
622 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
623 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
624 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
625 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
cfbf723e
AC
626 /* RocketRAID 1740/174x have different identifiers */
627 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
628 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
2d2744fc
JG
629
630 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
631 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
632 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
633 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
634 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
635
636 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
637
d9f9c6bc
FA
638 /* Adaptec 1430SA */
639 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
640
02a121da 641 /* Marvell 7042 support */
6a3d586d
MT
642 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
643
02a121da
ML
644 /* Highpoint RocketRAID PCIe series */
645 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
646 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
647
2d2744fc 648 { } /* terminate list */
20f733e7
BR
649};
650
651static struct pci_driver mv_pci_driver = {
652 .name = DRV_NAME,
653 .id_table = mv_pci_tbl,
654 .probe = mv_init_one,
655 .remove = ata_pci_remove_one,
656};
657
47c2b677
JG
658static const struct mv_hw_ops mv5xxx_ops = {
659 .phy_errata = mv5_phy_errata,
660 .enable_leds = mv5_enable_leds,
661 .read_preamp = mv5_read_preamp,
662 .reset_hc = mv5_reset_hc,
522479fb
JG
663 .reset_flash = mv5_reset_flash,
664 .reset_bus = mv5_reset_bus,
47c2b677
JG
665};
666
667static const struct mv_hw_ops mv6xxx_ops = {
668 .phy_errata = mv6_phy_errata,
669 .enable_leds = mv6_enable_leds,
670 .read_preamp = mv6_read_preamp,
671 .reset_hc = mv6_reset_hc,
522479fb
JG
672 .reset_flash = mv6_reset_flash,
673 .reset_bus = mv_reset_pci_bus,
47c2b677
JG
674};
675
ddef9bb3
JG
676/*
677 * module options
678 */
679static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
680
681
d88184fb
JG
682/* move to PCI layer or libata core? */
683static int pci_go_64(struct pci_dev *pdev)
684{
685 int rc;
686
687 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
688 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
689 if (rc) {
690 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
691 if (rc) {
692 dev_printk(KERN_ERR, &pdev->dev,
693 "64-bit DMA enable failed\n");
694 return rc;
695 }
696 }
697 } else {
698 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
699 if (rc) {
700 dev_printk(KERN_ERR, &pdev->dev,
701 "32-bit DMA enable failed\n");
702 return rc;
703 }
704 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
705 if (rc) {
706 dev_printk(KERN_ERR, &pdev->dev,
707 "32-bit consistent DMA enable failed\n");
708 return rc;
709 }
710 }
711
712 return rc;
713}
714
20f733e7
BR
715/*
716 * Functions
717 */
718
719static inline void writelfl(unsigned long data, void __iomem *addr)
720{
721 writel(data, addr);
722 (void) readl(addr); /* flush to avoid PCI posted write */
723}
724
20f733e7
BR
725static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
726{
727 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
728}
729
c9d39130
JG
730static inline unsigned int mv_hc_from_port(unsigned int port)
731{
732 return port >> MV_PORT_HC_SHIFT;
733}
734
735static inline unsigned int mv_hardport_from_port(unsigned int port)
736{
737 return port & MV_PORT_MASK;
738}
739
740static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
741 unsigned int port)
742{
743 return mv_hc_base(base, mv_hc_from_port(port));
744}
745
20f733e7
BR
746static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
747{
c9d39130 748 return mv_hc_base_from_port(base, port) +
8b260248 749 MV_SATAHC_ARBTR_REG_SZ +
c9d39130 750 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
20f733e7
BR
751}
752
753static inline void __iomem *mv_ap_base(struct ata_port *ap)
754{
0d5ff566 755 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
20f733e7
BR
756}
757
cca3974e 758static inline int mv_get_hc_count(unsigned long port_flags)
31961943 759{
cca3974e 760 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
31961943
BR
761}
762
763static void mv_irq_clear(struct ata_port *ap)
20f733e7 764{
20f733e7
BR
765}
766
c5d3e45a
JG
767static void mv_set_edma_ptrs(void __iomem *port_mmio,
768 struct mv_host_priv *hpriv,
769 struct mv_port_priv *pp)
770{
bdd4ddde
JG
771 u32 index;
772
c5d3e45a
JG
773 /*
774 * initialize request queue
775 */
bdd4ddde
JG
776 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
777
c5d3e45a
JG
778 WARN_ON(pp->crqb_dma & 0x3ff);
779 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
bdd4ddde 780 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
c5d3e45a
JG
781 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
782
783 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 784 writelfl((pp->crqb_dma & 0xffffffff) | index,
c5d3e45a
JG
785 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
786 else
bdd4ddde 787 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
c5d3e45a
JG
788
789 /*
790 * initialize response queue
791 */
bdd4ddde
JG
792 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
793
c5d3e45a
JG
794 WARN_ON(pp->crpb_dma & 0xff);
795 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
796
797 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
bdd4ddde 798 writelfl((pp->crpb_dma & 0xffffffff) | index,
c5d3e45a
JG
799 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
800 else
bdd4ddde 801 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
c5d3e45a 802
bdd4ddde 803 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
c5d3e45a 804 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
c5d3e45a
JG
805}
806
05b308e1
BR
807/**
808 * mv_start_dma - Enable eDMA engine
809 * @base: port base address
810 * @pp: port private data
811 *
beec7dbc
TH
812 * Verify the local cache of the eDMA state is accurate with a
813 * WARN_ON.
05b308e1
BR
814 *
815 * LOCKING:
816 * Inherited from caller.
817 */
c5d3e45a
JG
818static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
819 struct mv_port_priv *pp)
20f733e7 820{
c5d3e45a 821 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
bdd4ddde
JG
822 /* clear EDMA event indicators, if any */
823 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
824
825 mv_set_edma_ptrs(base, hpriv, pp);
826
afb0edd9
BR
827 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
828 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
829 }
beec7dbc 830 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
20f733e7
BR
831}
832
05b308e1 833/**
0ea9e179 834 * __mv_stop_dma - Disable eDMA engine
05b308e1
BR
835 * @ap: ATA channel to manipulate
836 *
beec7dbc
TH
837 * Verify the local cache of the eDMA state is accurate with a
838 * WARN_ON.
05b308e1
BR
839 *
840 * LOCKING:
841 * Inherited from caller.
842 */
0ea9e179 843static int __mv_stop_dma(struct ata_port *ap)
20f733e7 844{
31961943
BR
845 void __iomem *port_mmio = mv_ap_base(ap);
846 struct mv_port_priv *pp = ap->private_data;
31961943 847 u32 reg;
c5d3e45a 848 int i, err = 0;
31961943 849
4537deb5 850 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
afb0edd9 851 /* Disable EDMA if active. The disable bit auto clears.
31961943 852 */
31961943
BR
853 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
854 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
afb0edd9 855 } else {
beec7dbc 856 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
2dcb407e 857 }
8b260248 858
31961943
BR
859 /* now properly wait for the eDMA to stop */
860 for (i = 1000; i > 0; i--) {
861 reg = readl(port_mmio + EDMA_CMD_OFS);
4537deb5 862 if (!(reg & EDMA_EN))
31961943 863 break;
4537deb5 864
31961943
BR
865 udelay(100);
866 }
867
c5d3e45a 868 if (reg & EDMA_EN) {
f15a1daf 869 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
c5d3e45a 870 err = -EIO;
31961943 871 }
c5d3e45a
JG
872
873 return err;
20f733e7
BR
874}
875
0ea9e179
JG
876static int mv_stop_dma(struct ata_port *ap)
877{
878 unsigned long flags;
879 int rc;
880
881 spin_lock_irqsave(&ap->host->lock, flags);
882 rc = __mv_stop_dma(ap);
883 spin_unlock_irqrestore(&ap->host->lock, flags);
884
885 return rc;
886}
887
8a70f8dc 888#ifdef ATA_DEBUG
31961943 889static void mv_dump_mem(void __iomem *start, unsigned bytes)
20f733e7 890{
31961943
BR
891 int b, w;
892 for (b = 0; b < bytes; ) {
893 DPRINTK("%p: ", start + b);
894 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e 895 printk("%08x ", readl(start + b));
31961943
BR
896 b += sizeof(u32);
897 }
898 printk("\n");
899 }
31961943 900}
8a70f8dc
JG
901#endif
902
31961943
BR
903static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
904{
905#ifdef ATA_DEBUG
906 int b, w;
907 u32 dw;
908 for (b = 0; b < bytes; ) {
909 DPRINTK("%02x: ", b);
910 for (w = 0; b < bytes && w < 4; w++) {
2dcb407e
JG
911 (void) pci_read_config_dword(pdev, b, &dw);
912 printk("%08x ", dw);
31961943
BR
913 b += sizeof(u32);
914 }
915 printk("\n");
916 }
917#endif
918}
919static void mv_dump_all_regs(void __iomem *mmio_base, int port,
920 struct pci_dev *pdev)
921{
922#ifdef ATA_DEBUG
8b260248 923 void __iomem *hc_base = mv_hc_base(mmio_base,
31961943
BR
924 port >> MV_PORT_HC_SHIFT);
925 void __iomem *port_base;
926 int start_port, num_ports, p, start_hc, num_hcs, hc;
927
928 if (0 > port) {
929 start_hc = start_port = 0;
930 num_ports = 8; /* shld be benign for 4 port devs */
931 num_hcs = 2;
932 } else {
933 start_hc = port >> MV_PORT_HC_SHIFT;
934 start_port = port;
935 num_ports = num_hcs = 1;
936 }
8b260248 937 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
31961943
BR
938 num_ports > 1 ? num_ports - 1 : start_port);
939
940 if (NULL != pdev) {
941 DPRINTK("PCI config space regs:\n");
942 mv_dump_pci_cfg(pdev, 0x68);
943 }
944 DPRINTK("PCI regs:\n");
945 mv_dump_mem(mmio_base+0xc00, 0x3c);
946 mv_dump_mem(mmio_base+0xd00, 0x34);
947 mv_dump_mem(mmio_base+0xf00, 0x4);
948 mv_dump_mem(mmio_base+0x1d00, 0x6c);
949 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
d220c37e 950 hc_base = mv_hc_base(mmio_base, hc);
31961943
BR
951 DPRINTK("HC regs (HC %i):\n", hc);
952 mv_dump_mem(hc_base, 0x1c);
953 }
954 for (p = start_port; p < start_port + num_ports; p++) {
955 port_base = mv_port_base(mmio_base, p);
2dcb407e 956 DPRINTK("EDMA regs (port %i):\n", p);
31961943 957 mv_dump_mem(port_base, 0x54);
2dcb407e 958 DPRINTK("SATA regs (port %i):\n", p);
31961943
BR
959 mv_dump_mem(port_base+0x300, 0x60);
960 }
961#endif
20f733e7
BR
962}
963
964static unsigned int mv_scr_offset(unsigned int sc_reg_in)
965{
966 unsigned int ofs;
967
968 switch (sc_reg_in) {
969 case SCR_STATUS:
970 case SCR_CONTROL:
971 case SCR_ERROR:
972 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
973 break;
974 case SCR_ACTIVE:
975 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
976 break;
977 default:
978 ofs = 0xffffffffU;
979 break;
980 }
981 return ofs;
982}
983
da3dbb17 984static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
20f733e7
BR
985{
986 unsigned int ofs = mv_scr_offset(sc_reg_in);
987
da3dbb17
TH
988 if (ofs != 0xffffffffU) {
989 *val = readl(mv_ap_base(ap) + ofs);
990 return 0;
991 } else
992 return -EINVAL;
20f733e7
BR
993}
994
da3dbb17 995static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
20f733e7
BR
996{
997 unsigned int ofs = mv_scr_offset(sc_reg_in);
998
da3dbb17 999 if (ofs != 0xffffffffU) {
20f733e7 1000 writelfl(val, mv_ap_base(ap) + ofs);
da3dbb17
TH
1001 return 0;
1002 } else
1003 return -EINVAL;
20f733e7
BR
1004}
1005
c5d3e45a
JG
1006static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1007 void __iomem *port_mmio)
e4e7b892
JG
1008{
1009 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1010
1011 /* set up non-NCQ EDMA configuration */
c5d3e45a 1012 cfg &= ~(1 << 9); /* disable eQue */
e4e7b892 1013
e728eabe
JG
1014 if (IS_GEN_I(hpriv)) {
1015 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1016 cfg |= (1 << 8); /* enab config burst size mask */
e728eabe 1017 }
e4e7b892 1018
e728eabe
JG
1019 else if (IS_GEN_II(hpriv)) {
1020 cfg &= ~0x1f; /* clear queue depth */
e4e7b892 1021 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
e728eabe
JG
1022 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1023 }
e4e7b892
JG
1024
1025 else if (IS_GEN_IIE(hpriv)) {
e728eabe
JG
1026 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1027 cfg |= (1 << 22); /* enab 4-entry host queue cache */
e4e7b892
JG
1028 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1029 cfg |= (1 << 18); /* enab early completion */
e728eabe
JG
1030 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1031 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
4537deb5 1032 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
e4e7b892
JG
1033 }
1034
1035 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1036}
1037
05b308e1
BR
1038/**
1039 * mv_port_start - Port specific init/start routine.
1040 * @ap: ATA channel to manipulate
1041 *
1042 * Allocate and point to DMA memory, init port private memory,
1043 * zero indices.
1044 *
1045 * LOCKING:
1046 * Inherited from caller.
1047 */
31961943
BR
1048static int mv_port_start(struct ata_port *ap)
1049{
cca3974e
JG
1050 struct device *dev = ap->host->dev;
1051 struct mv_host_priv *hpriv = ap->host->private_data;
31961943
BR
1052 struct mv_port_priv *pp;
1053 void __iomem *port_mmio = mv_ap_base(ap);
1054 void *mem;
1055 dma_addr_t mem_dma;
0ea9e179 1056 unsigned long flags;
24dc5f33 1057 int rc;
31961943 1058
24dc5f33 1059 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
6037d6bb 1060 if (!pp)
24dc5f33 1061 return -ENOMEM;
31961943 1062
24dc5f33
TH
1063 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1064 GFP_KERNEL);
6037d6bb 1065 if (!mem)
24dc5f33 1066 return -ENOMEM;
31961943
BR
1067 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1068
6037d6bb
JG
1069 rc = ata_pad_alloc(ap, dev);
1070 if (rc)
24dc5f33 1071 return rc;
6037d6bb 1072
8b260248 1073 /* First item in chunk of DMA memory:
31961943
BR
1074 * 32-slot command request table (CRQB), 32 bytes each in size
1075 */
1076 pp->crqb = mem;
1077 pp->crqb_dma = mem_dma;
1078 mem += MV_CRQB_Q_SZ;
1079 mem_dma += MV_CRQB_Q_SZ;
1080
8b260248 1081 /* Second item:
31961943
BR
1082 * 32-slot command response table (CRPB), 8 bytes each in size
1083 */
1084 pp->crpb = mem;
1085 pp->crpb_dma = mem_dma;
1086 mem += MV_CRPB_Q_SZ;
1087 mem_dma += MV_CRPB_Q_SZ;
1088
1089 /* Third item:
1090 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1091 */
1092 pp->sg_tbl = mem;
1093 pp->sg_tbl_dma = mem_dma;
1094
0ea9e179
JG
1095 spin_lock_irqsave(&ap->host->lock, flags);
1096
c5d3e45a 1097 mv_edma_cfg(ap, hpriv, port_mmio);
e4e7b892 1098
c5d3e45a 1099 mv_set_edma_ptrs(port_mmio, hpriv, pp);
31961943 1100
0ea9e179
JG
1101 spin_unlock_irqrestore(&ap->host->lock, flags);
1102
31961943
BR
1103 /* Don't turn on EDMA here...do it before DMA commands only. Else
1104 * we'll be unable to send non-data, PIO, etc due to restricted access
1105 * to shadow regs.
1106 */
1107 ap->private_data = pp;
1108 return 0;
1109}
1110
05b308e1
BR
1111/**
1112 * mv_port_stop - Port specific cleanup/stop routine.
1113 * @ap: ATA channel to manipulate
1114 *
1115 * Stop DMA, cleanup port memory.
1116 *
1117 * LOCKING:
cca3974e 1118 * This routine uses the host lock to protect the DMA stop.
05b308e1 1119 */
31961943
BR
1120static void mv_port_stop(struct ata_port *ap)
1121{
31961943 1122 mv_stop_dma(ap);
31961943
BR
1123}
1124
05b308e1
BR
1125/**
1126 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1127 * @qc: queued command whose SG list to source from
1128 *
1129 * Populate the SG list and mark the last entry.
1130 *
1131 * LOCKING:
1132 * Inherited from caller.
1133 */
6c08772e 1134static void mv_fill_sg(struct ata_queued_cmd *qc)
31961943
BR
1135{
1136 struct mv_port_priv *pp = qc->ap->private_data;
972c26bd 1137 struct scatterlist *sg;
3be6cbd7 1138 struct mv_sg *mv_sg, *last_sg = NULL;
ff2aeb1e 1139 unsigned int si;
31961943 1140
d88184fb 1141 mv_sg = pp->sg_tbl;
ff2aeb1e 1142 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d88184fb
JG
1143 dma_addr_t addr = sg_dma_address(sg);
1144 u32 sg_len = sg_dma_len(sg);
22374677 1145
4007b493
OJ
1146 while (sg_len) {
1147 u32 offset = addr & 0xffff;
1148 u32 len = sg_len;
22374677 1149
4007b493
OJ
1150 if ((offset + sg_len > 0x10000))
1151 len = 0x10000 - offset;
1152
1153 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1154 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
6c08772e 1155 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
4007b493
OJ
1156
1157 sg_len -= len;
1158 addr += len;
1159
3be6cbd7 1160 last_sg = mv_sg;
4007b493 1161 mv_sg++;
4007b493 1162 }
31961943 1163 }
3be6cbd7
JG
1164
1165 if (likely(last_sg))
1166 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
31961943
BR
1167}
1168
5796d1c4 1169static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
31961943 1170{
559eedad 1171 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
31961943 1172 (last ? CRQB_CMD_LAST : 0);
559eedad 1173 *cmdw = cpu_to_le16(tmp);
31961943
BR
1174}
1175
05b308e1
BR
1176/**
1177 * mv_qc_prep - Host specific command preparation.
1178 * @qc: queued command to prepare
1179 *
1180 * This routine simply redirects to the general purpose routine
1181 * if command is not DMA. Else, it handles prep of the CRQB
1182 * (command request block), does some sanity checking, and calls
1183 * the SG load routine.
1184 *
1185 * LOCKING:
1186 * Inherited from caller.
1187 */
31961943
BR
1188static void mv_qc_prep(struct ata_queued_cmd *qc)
1189{
1190 struct ata_port *ap = qc->ap;
1191 struct mv_port_priv *pp = ap->private_data;
e1469874 1192 __le16 *cw;
31961943
BR
1193 struct ata_taskfile *tf;
1194 u16 flags = 0;
a6432436 1195 unsigned in_index;
31961943 1196
2dcb407e 1197 if (qc->tf.protocol != ATA_PROT_DMA)
31961943 1198 return;
20f733e7 1199
31961943
BR
1200 /* Fill in command request block
1201 */
e4e7b892 1202 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
31961943 1203 flags |= CRQB_FLAG_READ;
beec7dbc 1204 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
31961943 1205 flags |= qc->tag << CRQB_TAG_SHIFT;
4537deb5 1206 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
31961943 1207
bdd4ddde
JG
1208 /* get current queue index from software */
1209 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1210
1211 pp->crqb[in_index].sg_addr =
31961943 1212 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
a6432436 1213 pp->crqb[in_index].sg_addr_hi =
31961943 1214 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
a6432436 1215 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
31961943 1216
a6432436 1217 cw = &pp->crqb[in_index].ata_cmd[0];
31961943
BR
1218 tf = &qc->tf;
1219
1220 /* Sadly, the CRQB cannot accomodate all registers--there are
1221 * only 11 bytes...so we must pick and choose required
1222 * registers based on the command. So, we drop feature and
1223 * hob_feature for [RW] DMA commands, but they are needed for
1224 * NCQ. NCQ will drop hob_nsect.
20f733e7 1225 */
31961943
BR
1226 switch (tf->command) {
1227 case ATA_CMD_READ:
1228 case ATA_CMD_READ_EXT:
1229 case ATA_CMD_WRITE:
1230 case ATA_CMD_WRITE_EXT:
c15d85c8 1231 case ATA_CMD_WRITE_FUA_EXT:
31961943
BR
1232 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1233 break;
1234#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1235 case ATA_CMD_FPDMA_READ:
1236 case ATA_CMD_FPDMA_WRITE:
8b260248 1237 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
31961943
BR
1238 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1239 break;
1240#endif /* FIXME: remove this line when NCQ added */
1241 default:
1242 /* The only other commands EDMA supports in non-queued and
1243 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1244 * of which are defined/used by Linux. If we get here, this
1245 * driver needs work.
1246 *
1247 * FIXME: modify libata to give qc_prep a return value and
1248 * return error here.
1249 */
1250 BUG_ON(tf->command);
1251 break;
1252 }
1253 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1254 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1255 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1256 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1257 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1258 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1259 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1260 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1261 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1262
e4e7b892
JG
1263 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1264 return;
1265 mv_fill_sg(qc);
1266}
1267
1268/**
1269 * mv_qc_prep_iie - Host specific command preparation.
1270 * @qc: queued command to prepare
1271 *
1272 * This routine simply redirects to the general purpose routine
1273 * if command is not DMA. Else, it handles prep of the CRQB
1274 * (command request block), does some sanity checking, and calls
1275 * the SG load routine.
1276 *
1277 * LOCKING:
1278 * Inherited from caller.
1279 */
1280static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1281{
1282 struct ata_port *ap = qc->ap;
1283 struct mv_port_priv *pp = ap->private_data;
1284 struct mv_crqb_iie *crqb;
1285 struct ata_taskfile *tf;
a6432436 1286 unsigned in_index;
e4e7b892
JG
1287 u32 flags = 0;
1288
2dcb407e 1289 if (qc->tf.protocol != ATA_PROT_DMA)
e4e7b892
JG
1290 return;
1291
e4e7b892
JG
1292 /* Fill in Gen IIE command request block
1293 */
1294 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1295 flags |= CRQB_FLAG_READ;
1296
beec7dbc 1297 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
e4e7b892 1298 flags |= qc->tag << CRQB_TAG_SHIFT;
bdd4ddde 1299 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
4537deb5 1300 what we use as our tag */
e4e7b892 1301
bdd4ddde
JG
1302 /* get current queue index from software */
1303 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
a6432436
ML
1304
1305 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
e4e7b892
JG
1306 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1307 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1308 crqb->flags = cpu_to_le32(flags);
1309
1310 tf = &qc->tf;
1311 crqb->ata_cmd[0] = cpu_to_le32(
1312 (tf->command << 16) |
1313 (tf->feature << 24)
1314 );
1315 crqb->ata_cmd[1] = cpu_to_le32(
1316 (tf->lbal << 0) |
1317 (tf->lbam << 8) |
1318 (tf->lbah << 16) |
1319 (tf->device << 24)
1320 );
1321 crqb->ata_cmd[2] = cpu_to_le32(
1322 (tf->hob_lbal << 0) |
1323 (tf->hob_lbam << 8) |
1324 (tf->hob_lbah << 16) |
1325 (tf->hob_feature << 24)
1326 );
1327 crqb->ata_cmd[3] = cpu_to_le32(
1328 (tf->nsect << 0) |
1329 (tf->hob_nsect << 8)
1330 );
1331
1332 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
31961943 1333 return;
31961943
BR
1334 mv_fill_sg(qc);
1335}
1336
05b308e1
BR
1337/**
1338 * mv_qc_issue - Initiate a command to the host
1339 * @qc: queued command to start
1340 *
1341 * This routine simply redirects to the general purpose routine
1342 * if command is not DMA. Else, it sanity checks our local
1343 * caches of the request producer/consumer indices then enables
1344 * DMA and bumps the request producer index.
1345 *
1346 * LOCKING:
1347 * Inherited from caller.
1348 */
9a3d9eb0 1349static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
31961943 1350{
c5d3e45a
JG
1351 struct ata_port *ap = qc->ap;
1352 void __iomem *port_mmio = mv_ap_base(ap);
1353 struct mv_port_priv *pp = ap->private_data;
1354 struct mv_host_priv *hpriv = ap->host->private_data;
bdd4ddde 1355 u32 in_index;
31961943 1356
c5d3e45a 1357 if (qc->tf.protocol != ATA_PROT_DMA) {
31961943
BR
1358 /* We're about to send a non-EDMA capable command to the
1359 * port. Turn off EDMA so there won't be problems accessing
1360 * shadow block, etc registers.
1361 */
0ea9e179 1362 __mv_stop_dma(ap);
31961943
BR
1363 return ata_qc_issue_prot(qc);
1364 }
1365
bdd4ddde
JG
1366 mv_start_dma(port_mmio, hpriv, pp);
1367
1368 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
31961943 1369
31961943 1370 /* until we do queuing, the queue should be empty at this point */
a6432436
ML
1371 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1372 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
31961943 1373
bdd4ddde 1374 pp->req_idx++;
31961943 1375
bdd4ddde 1376 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
31961943
BR
1377
1378 /* and write the request in pointer to kick the EDMA to life */
bdd4ddde
JG
1379 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1380 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
31961943
BR
1381
1382 return 0;
1383}
1384
05b308e1
BR
1385/**
1386 * mv_err_intr - Handle error interrupts on the port
1387 * @ap: ATA channel to manipulate
9b358e30 1388 * @reset_allowed: bool: 0 == don't trigger from reset here
05b308e1
BR
1389 *
1390 * In most cases, just clear the interrupt and move on. However,
1391 * some cases require an eDMA reset, which is done right before
1392 * the COMRESET in mv_phy_reset(). The SERR case requires a
1393 * clear of pending errors in the SATA SERROR register. Finally,
1394 * if the port disabled DMA, update our cached copy to match.
1395 *
1396 * LOCKING:
1397 * Inherited from caller.
1398 */
bdd4ddde 1399static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
31961943
BR
1400{
1401 void __iomem *port_mmio = mv_ap_base(ap);
bdd4ddde
JG
1402 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1403 struct mv_port_priv *pp = ap->private_data;
1404 struct mv_host_priv *hpriv = ap->host->private_data;
1405 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1406 unsigned int action = 0, err_mask = 0;
9af5c9c9 1407 struct ata_eh_info *ehi = &ap->link.eh_info;
20f733e7 1408
bdd4ddde 1409 ata_ehi_clear_desc(ehi);
20f733e7 1410
bdd4ddde
JG
1411 if (!edma_enabled) {
1412 /* just a guess: do we need to do this? should we
1413 * expand this, and do it in all cases?
1414 */
936fd732
TH
1415 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1416 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
20f733e7 1417 }
bdd4ddde
JG
1418
1419 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1420
1421 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1422
1423 /*
1424 * all generations share these EDMA error cause bits
1425 */
1426
1427 if (edma_err_cause & EDMA_ERR_DEV)
1428 err_mask |= AC_ERR_DEV;
1429 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
6c1153e0 1430 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
bdd4ddde
JG
1431 EDMA_ERR_INTRL_PAR)) {
1432 err_mask |= AC_ERR_ATA_BUS;
1433 action |= ATA_EH_HARDRESET;
b64bbc39 1434 ata_ehi_push_desc(ehi, "parity error");
bdd4ddde
JG
1435 }
1436 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1437 ata_ehi_hotplugged(ehi);
1438 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
b64bbc39 1439 "dev disconnect" : "dev connect");
3606a380 1440 action |= ATA_EH_HARDRESET;
bdd4ddde
JG
1441 }
1442
ee9ccdf7 1443 if (IS_GEN_I(hpriv)) {
bdd4ddde
JG
1444 eh_freeze_mask = EDMA_EH_FREEZE_5;
1445
1446 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1447 struct mv_port_priv *pp = ap->private_data;
1448 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1449 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1450 }
1451 } else {
1452 eh_freeze_mask = EDMA_EH_FREEZE;
1453
1454 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1455 struct mv_port_priv *pp = ap->private_data;
1456 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
b64bbc39 1457 ata_ehi_push_desc(ehi, "EDMA self-disable");
bdd4ddde
JG
1458 }
1459
1460 if (edma_err_cause & EDMA_ERR_SERR) {
936fd732
TH
1461 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1462 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
bdd4ddde
JG
1463 err_mask = AC_ERR_ATA_BUS;
1464 action |= ATA_EH_HARDRESET;
1465 }
afb0edd9 1466 }
20f733e7
BR
1467
1468 /* Clear EDMA now that SERR cleanup done */
3606a380 1469 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
20f733e7 1470
bdd4ddde
JG
1471 if (!err_mask) {
1472 err_mask = AC_ERR_OTHER;
1473 action |= ATA_EH_HARDRESET;
1474 }
1475
1476 ehi->serror |= serr;
1477 ehi->action |= action;
1478
1479 if (qc)
1480 qc->err_mask |= err_mask;
1481 else
1482 ehi->err_mask |= err_mask;
1483
1484 if (edma_err_cause & eh_freeze_mask)
1485 ata_port_freeze(ap);
1486 else
1487 ata_port_abort(ap);
1488}
1489
1490static void mv_intr_pio(struct ata_port *ap)
1491{
1492 struct ata_queued_cmd *qc;
1493 u8 ata_status;
1494
1495 /* ignore spurious intr if drive still BUSY */
1496 ata_status = readb(ap->ioaddr.status_addr);
1497 if (unlikely(ata_status & ATA_BUSY))
1498 return;
1499
1500 /* get active ATA command */
9af5c9c9 1501 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1502 if (unlikely(!qc)) /* no active tag */
1503 return;
1504 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1505 return;
1506
1507 /* and finally, complete the ATA command */
1508 qc->err_mask |= ac_err_mask(ata_status);
1509 ata_qc_complete(qc);
1510}
1511
1512static void mv_intr_edma(struct ata_port *ap)
1513{
1514 void __iomem *port_mmio = mv_ap_base(ap);
1515 struct mv_host_priv *hpriv = ap->host->private_data;
1516 struct mv_port_priv *pp = ap->private_data;
1517 struct ata_queued_cmd *qc;
1518 u32 out_index, in_index;
1519 bool work_done = false;
1520
1521 /* get h/w response queue pointer */
1522 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1523 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1524
1525 while (1) {
1526 u16 status;
6c1153e0 1527 unsigned int tag;
bdd4ddde
JG
1528
1529 /* get s/w response queue last-read pointer, and compare */
1530 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1531 if (in_index == out_index)
1532 break;
1533
bdd4ddde 1534 /* 50xx: get active ATA command */
0ea9e179 1535 if (IS_GEN_I(hpriv))
9af5c9c9 1536 tag = ap->link.active_tag;
bdd4ddde 1537
6c1153e0
JG
1538 /* Gen II/IIE: get active ATA command via tag, to enable
1539 * support for queueing. this works transparently for
1540 * queued and non-queued modes.
bdd4ddde 1541 */
6c1153e0
JG
1542 else if (IS_GEN_II(hpriv))
1543 tag = (le16_to_cpu(pp->crpb[out_index].id)
1544 >> CRPB_IOID_SHIFT_6) & 0x3f;
bdd4ddde 1545
6c1153e0
JG
1546 else /* IS_GEN_IIE */
1547 tag = (le16_to_cpu(pp->crpb[out_index].id)
1548 >> CRPB_IOID_SHIFT_7) & 0x3f;
bdd4ddde 1549
6c1153e0 1550 qc = ata_qc_from_tag(ap, tag);
bdd4ddde
JG
1551
1552 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1553 * bits (WARNING: might not necessarily be associated
1554 * with this command), which -should- be clear
1555 * if all is well
1556 */
1557 status = le16_to_cpu(pp->crpb[out_index].flags);
1558 if (unlikely(status & 0xff)) {
1559 mv_err_intr(ap, qc);
1560 return;
1561 }
1562
1563 /* and finally, complete the ATA command */
1564 if (qc) {
1565 qc->err_mask |=
1566 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1567 ata_qc_complete(qc);
1568 }
1569
0ea9e179 1570 /* advance software response queue pointer, to
bdd4ddde
JG
1571 * indicate (after the loop completes) to hardware
1572 * that we have consumed a response queue entry.
1573 */
1574 work_done = true;
1575 pp->resp_idx++;
1576 }
1577
1578 if (work_done)
1579 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1580 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1581 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
20f733e7
BR
1582}
1583
05b308e1
BR
1584/**
1585 * mv_host_intr - Handle all interrupts on the given host controller
cca3974e 1586 * @host: host specific structure
05b308e1
BR
1587 * @relevant: port error bits relevant to this host controller
1588 * @hc: which host controller we're to look at
1589 *
1590 * Read then write clear the HC interrupt status then walk each
1591 * port connected to the HC and see if it needs servicing. Port
1592 * success ints are reported in the HC interrupt status reg, the
1593 * port error ints are reported in the higher level main
1594 * interrupt status register and thus are passed in via the
1595 * 'relevant' argument.
1596 *
1597 * LOCKING:
1598 * Inherited from caller.
1599 */
cca3974e 1600static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
20f733e7 1601{
0d5ff566 1602 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7 1603 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
20f733e7 1604 u32 hc_irq_cause;
c5d3e45a 1605 int port, port0;
20f733e7 1606
35177265 1607 if (hc == 0)
20f733e7 1608 port0 = 0;
35177265 1609 else
20f733e7 1610 port0 = MV_PORTS_PER_HC;
20f733e7
BR
1611
1612 /* we'll need the HC success int register in most cases */
1613 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
bdd4ddde
JG
1614 if (!hc_irq_cause)
1615 return;
1616
1617 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
1618
1619 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
2dcb407e 1620 hc, relevant, hc_irq_cause);
20f733e7
BR
1621
1622 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
cca3974e 1623 struct ata_port *ap = host->ports[port];
63af2a5c 1624 struct mv_port_priv *pp = ap->private_data;
bdd4ddde 1625 int have_err_bits, hard_port, shift;
55d8ca4f 1626
bdd4ddde 1627 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
a2c91a88
JG
1628 continue;
1629
31961943 1630 shift = port << 1; /* (port * 2) */
20f733e7
BR
1631 if (port >= MV_PORTS_PER_HC) {
1632 shift++; /* skip bit 8 in the HC Main IRQ reg */
1633 }
bdd4ddde
JG
1634 have_err_bits = ((PORT0_ERR << shift) & relevant);
1635
1636 if (unlikely(have_err_bits)) {
1637 struct ata_queued_cmd *qc;
8b260248 1638
9af5c9c9 1639 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1640 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1641 continue;
1642
1643 mv_err_intr(ap, qc);
1644 continue;
1645 }
1646
1647 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1648
1649 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1650 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1651 mv_intr_edma(ap);
1652 } else {
1653 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1654 mv_intr_pio(ap);
20f733e7
BR
1655 }
1656 }
1657 VPRINTK("EXIT\n");
1658}
1659
bdd4ddde
JG
1660static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1661{
02a121da 1662 struct mv_host_priv *hpriv = host->private_data;
bdd4ddde
JG
1663 struct ata_port *ap;
1664 struct ata_queued_cmd *qc;
1665 struct ata_eh_info *ehi;
1666 unsigned int i, err_mask, printed = 0;
1667 u32 err_cause;
1668
02a121da 1669 err_cause = readl(mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1670
1671 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1672 err_cause);
1673
1674 DPRINTK("All regs @ PCI error\n");
1675 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1676
02a121da 1677 writelfl(0, mmio + hpriv->irq_cause_ofs);
bdd4ddde
JG
1678
1679 for (i = 0; i < host->n_ports; i++) {
1680 ap = host->ports[i];
936fd732 1681 if (!ata_link_offline(&ap->link)) {
9af5c9c9 1682 ehi = &ap->link.eh_info;
bdd4ddde
JG
1683 ata_ehi_clear_desc(ehi);
1684 if (!printed++)
1685 ata_ehi_push_desc(ehi,
1686 "PCI err cause 0x%08x", err_cause);
1687 err_mask = AC_ERR_HOST_BUS;
1688 ehi->action = ATA_EH_HARDRESET;
9af5c9c9 1689 qc = ata_qc_from_tag(ap, ap->link.active_tag);
bdd4ddde
JG
1690 if (qc)
1691 qc->err_mask |= err_mask;
1692 else
1693 ehi->err_mask |= err_mask;
1694
1695 ata_port_freeze(ap);
1696 }
1697 }
1698}
1699
05b308e1 1700/**
c5d3e45a 1701 * mv_interrupt - Main interrupt event handler
05b308e1
BR
1702 * @irq: unused
1703 * @dev_instance: private data; in this case the host structure
05b308e1
BR
1704 *
1705 * Read the read only register to determine if any host
1706 * controllers have pending interrupts. If so, call lower level
1707 * routine to handle. Also check for PCI errors which are only
1708 * reported here.
1709 *
8b260248 1710 * LOCKING:
cca3974e 1711 * This routine holds the host lock while processing pending
05b308e1
BR
1712 * interrupts.
1713 */
7d12e780 1714static irqreturn_t mv_interrupt(int irq, void *dev_instance)
20f733e7 1715{
cca3974e 1716 struct ata_host *host = dev_instance;
20f733e7 1717 unsigned int hc, handled = 0, n_hcs;
0d5ff566 1718 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
20f733e7
BR
1719 u32 irq_stat;
1720
20f733e7 1721 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
20f733e7
BR
1722
1723 /* check the cases where we either have nothing pending or have read
1724 * a bogus register value which can indicate HW removal or PCI fault
1725 */
35177265 1726 if (!irq_stat || (0xffffffffU == irq_stat))
20f733e7 1727 return IRQ_NONE;
20f733e7 1728
cca3974e
JG
1729 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1730 spin_lock(&host->lock);
20f733e7 1731
bdd4ddde
JG
1732 if (unlikely(irq_stat & PCI_ERR)) {
1733 mv_pci_error(host, mmio);
1734 handled = 1;
1735 goto out_unlock; /* skip all other HC irq handling */
1736 }
1737
20f733e7
BR
1738 for (hc = 0; hc < n_hcs; hc++) {
1739 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1740 if (relevant) {
cca3974e 1741 mv_host_intr(host, relevant, hc);
bdd4ddde 1742 handled = 1;
20f733e7
BR
1743 }
1744 }
615ab953 1745
bdd4ddde 1746out_unlock:
cca3974e 1747 spin_unlock(&host->lock);
20f733e7
BR
1748
1749 return IRQ_RETVAL(handled);
1750}
1751
c9d39130
JG
1752static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1753{
1754 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1755 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1756
1757 return hc_mmio + ofs;
1758}
1759
1760static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1761{
1762 unsigned int ofs;
1763
1764 switch (sc_reg_in) {
1765 case SCR_STATUS:
1766 case SCR_ERROR:
1767 case SCR_CONTROL:
1768 ofs = sc_reg_in * sizeof(u32);
1769 break;
1770 default:
1771 ofs = 0xffffffffU;
1772 break;
1773 }
1774 return ofs;
1775}
1776
da3dbb17 1777static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
c9d39130 1778{
0d5ff566
TH
1779 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1780 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1781 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1782
da3dbb17
TH
1783 if (ofs != 0xffffffffU) {
1784 *val = readl(addr + ofs);
1785 return 0;
1786 } else
1787 return -EINVAL;
c9d39130
JG
1788}
1789
da3dbb17 1790static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
c9d39130 1791{
0d5ff566
TH
1792 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1793 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
c9d39130
JG
1794 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1795
da3dbb17 1796 if (ofs != 0xffffffffU) {
0d5ff566 1797 writelfl(val, addr + ofs);
da3dbb17
TH
1798 return 0;
1799 } else
1800 return -EINVAL;
c9d39130
JG
1801}
1802
522479fb
JG
1803static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1804{
522479fb
JG
1805 int early_5080;
1806
44c10138 1807 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
522479fb
JG
1808
1809 if (!early_5080) {
1810 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1811 tmp |= (1 << 0);
1812 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1813 }
1814
1815 mv_reset_pci_bus(pdev, mmio);
1816}
1817
1818static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1819{
1820 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1821}
1822
47c2b677 1823static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
1824 void __iomem *mmio)
1825{
c9d39130
JG
1826 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1827 u32 tmp;
1828
1829 tmp = readl(phy_mmio + MV5_PHY_MODE);
1830
1831 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1832 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
ba3fe8fb
JG
1833}
1834
47c2b677 1835static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 1836{
522479fb
JG
1837 u32 tmp;
1838
1839 writel(0, mmio + MV_GPIO_PORT_CTL);
1840
1841 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1842
1843 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1844 tmp |= ~(1 << 0);
1845 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
ba3fe8fb
JG
1846}
1847
2a47ce06
JG
1848static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1849 unsigned int port)
bca1c4eb 1850{
c9d39130
JG
1851 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1852 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1853 u32 tmp;
1854 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1855
1856 if (fix_apm_sq) {
1857 tmp = readl(phy_mmio + MV5_LT_MODE);
1858 tmp |= (1 << 19);
1859 writel(tmp, phy_mmio + MV5_LT_MODE);
1860
1861 tmp = readl(phy_mmio + MV5_PHY_CTL);
1862 tmp &= ~0x3;
1863 tmp |= 0x1;
1864 writel(tmp, phy_mmio + MV5_PHY_CTL);
1865 }
1866
1867 tmp = readl(phy_mmio + MV5_PHY_MODE);
1868 tmp &= ~mask;
1869 tmp |= hpriv->signal[port].pre;
1870 tmp |= hpriv->signal[port].amps;
1871 writel(tmp, phy_mmio + MV5_PHY_MODE);
bca1c4eb
JG
1872}
1873
c9d39130
JG
1874
1875#undef ZERO
1876#define ZERO(reg) writel(0, port_mmio + (reg))
1877static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1878 unsigned int port)
1879{
1880 void __iomem *port_mmio = mv_port_base(mmio, port);
1881
1882 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1883
1884 mv_channel_reset(hpriv, mmio, port);
1885
1886 ZERO(0x028); /* command */
1887 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1888 ZERO(0x004); /* timer */
1889 ZERO(0x008); /* irq err cause */
1890 ZERO(0x00c); /* irq err mask */
1891 ZERO(0x010); /* rq bah */
1892 ZERO(0x014); /* rq inp */
1893 ZERO(0x018); /* rq outp */
1894 ZERO(0x01c); /* respq bah */
1895 ZERO(0x024); /* respq outp */
1896 ZERO(0x020); /* respq inp */
1897 ZERO(0x02c); /* test control */
1898 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1899}
1900#undef ZERO
1901
1902#define ZERO(reg) writel(0, hc_mmio + (reg))
1903static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 unsigned int hc)
47c2b677 1905{
c9d39130
JG
1906 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1907 u32 tmp;
1908
1909 ZERO(0x00c);
1910 ZERO(0x010);
1911 ZERO(0x014);
1912 ZERO(0x018);
1913
1914 tmp = readl(hc_mmio + 0x20);
1915 tmp &= 0x1c1c1c1c;
1916 tmp |= 0x03030303;
1917 writel(tmp, hc_mmio + 0x20);
1918}
1919#undef ZERO
1920
1921static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1922 unsigned int n_hc)
1923{
1924 unsigned int hc, port;
1925
1926 for (hc = 0; hc < n_hc; hc++) {
1927 for (port = 0; port < MV_PORTS_PER_HC; port++)
1928 mv5_reset_hc_port(hpriv, mmio,
1929 (hc * MV_PORTS_PER_HC) + port);
1930
1931 mv5_reset_one_hc(hpriv, mmio, hc);
1932 }
1933
1934 return 0;
47c2b677
JG
1935}
1936
101ffae2
JG
1937#undef ZERO
1938#define ZERO(reg) writel(0, mmio + (reg))
1939static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1940{
02a121da
ML
1941 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1942 struct mv_host_priv *hpriv = host->private_data;
101ffae2
JG
1943 u32 tmp;
1944
1945 tmp = readl(mmio + MV_PCI_MODE);
1946 tmp &= 0xff00ffff;
1947 writel(tmp, mmio + MV_PCI_MODE);
1948
1949 ZERO(MV_PCI_DISC_TIMER);
1950 ZERO(MV_PCI_MSI_TRIGGER);
1951 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1952 ZERO(HC_MAIN_IRQ_MASK_OFS);
1953 ZERO(MV_PCI_SERR_MASK);
02a121da
ML
1954 ZERO(hpriv->irq_cause_ofs);
1955 ZERO(hpriv->irq_mask_ofs);
101ffae2
JG
1956 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1957 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1958 ZERO(MV_PCI_ERR_ATTRIBUTE);
1959 ZERO(MV_PCI_ERR_COMMAND);
1960}
1961#undef ZERO
1962
1963static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1964{
1965 u32 tmp;
1966
1967 mv5_reset_flash(hpriv, mmio);
1968
1969 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1970 tmp &= 0x3;
1971 tmp |= (1 << 5) | (1 << 6);
1972 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1973}
1974
1975/**
1976 * mv6_reset_hc - Perform the 6xxx global soft reset
1977 * @mmio: base address of the HBA
1978 *
1979 * This routine only applies to 6xxx parts.
1980 *
1981 * LOCKING:
1982 * Inherited from caller.
1983 */
c9d39130
JG
1984static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1985 unsigned int n_hc)
101ffae2
JG
1986{
1987 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1988 int i, rc = 0;
1989 u32 t;
1990
1991 /* Following procedure defined in PCI "main command and status
1992 * register" table.
1993 */
1994 t = readl(reg);
1995 writel(t | STOP_PCI_MASTER, reg);
1996
1997 for (i = 0; i < 1000; i++) {
1998 udelay(1);
1999 t = readl(reg);
2dcb407e 2000 if (PCI_MASTER_EMPTY & t)
101ffae2 2001 break;
101ffae2
JG
2002 }
2003 if (!(PCI_MASTER_EMPTY & t)) {
2004 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2005 rc = 1;
2006 goto done;
2007 }
2008
2009 /* set reset */
2010 i = 5;
2011 do {
2012 writel(t | GLOB_SFT_RST, reg);
2013 t = readl(reg);
2014 udelay(1);
2015 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2016
2017 if (!(GLOB_SFT_RST & t)) {
2018 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2019 rc = 1;
2020 goto done;
2021 }
2022
2023 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2024 i = 5;
2025 do {
2026 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2027 t = readl(reg);
2028 udelay(1);
2029 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2030
2031 if (GLOB_SFT_RST & t) {
2032 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2033 rc = 1;
2034 }
2035done:
2036 return rc;
2037}
2038
47c2b677 2039static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
ba3fe8fb
JG
2040 void __iomem *mmio)
2041{
2042 void __iomem *port_mmio;
2043 u32 tmp;
2044
ba3fe8fb
JG
2045 tmp = readl(mmio + MV_RESET_CFG);
2046 if ((tmp & (1 << 0)) == 0) {
47c2b677 2047 hpriv->signal[idx].amps = 0x7 << 8;
ba3fe8fb
JG
2048 hpriv->signal[idx].pre = 0x1 << 5;
2049 return;
2050 }
2051
2052 port_mmio = mv_port_base(mmio, idx);
2053 tmp = readl(port_mmio + PHY_MODE2);
2054
2055 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2056 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2057}
2058
47c2b677 2059static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
ba3fe8fb 2060{
47c2b677 2061 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
ba3fe8fb
JG
2062}
2063
c9d39130 2064static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2a47ce06 2065 unsigned int port)
bca1c4eb 2066{
c9d39130
JG
2067 void __iomem *port_mmio = mv_port_base(mmio, port);
2068
bca1c4eb 2069 u32 hp_flags = hpriv->hp_flags;
47c2b677
JG
2070 int fix_phy_mode2 =
2071 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
bca1c4eb 2072 int fix_phy_mode4 =
47c2b677
JG
2073 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2074 u32 m2, tmp;
2075
2076 if (fix_phy_mode2) {
2077 m2 = readl(port_mmio + PHY_MODE2);
2078 m2 &= ~(1 << 16);
2079 m2 |= (1 << 31);
2080 writel(m2, port_mmio + PHY_MODE2);
2081
2082 udelay(200);
2083
2084 m2 = readl(port_mmio + PHY_MODE2);
2085 m2 &= ~((1 << 16) | (1 << 31));
2086 writel(m2, port_mmio + PHY_MODE2);
2087
2088 udelay(200);
2089 }
2090
2091 /* who knows what this magic does */
2092 tmp = readl(port_mmio + PHY_MODE3);
2093 tmp &= ~0x7F800000;
2094 tmp |= 0x2A800000;
2095 writel(tmp, port_mmio + PHY_MODE3);
bca1c4eb
JG
2096
2097 if (fix_phy_mode4) {
47c2b677 2098 u32 m4;
bca1c4eb
JG
2099
2100 m4 = readl(port_mmio + PHY_MODE4);
47c2b677
JG
2101
2102 if (hp_flags & MV_HP_ERRATA_60X1B2)
2103 tmp = readl(port_mmio + 0x310);
bca1c4eb
JG
2104
2105 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2106
2107 writel(m4, port_mmio + PHY_MODE4);
47c2b677
JG
2108
2109 if (hp_flags & MV_HP_ERRATA_60X1B2)
2110 writel(tmp, port_mmio + 0x310);
bca1c4eb
JG
2111 }
2112
2113 /* Revert values of pre-emphasis and signal amps to the saved ones */
2114 m2 = readl(port_mmio + PHY_MODE2);
2115
2116 m2 &= ~MV_M2_PREAMP_MASK;
2a47ce06
JG
2117 m2 |= hpriv->signal[port].amps;
2118 m2 |= hpriv->signal[port].pre;
47c2b677 2119 m2 &= ~(1 << 16);
bca1c4eb 2120
e4e7b892
JG
2121 /* according to mvSata 3.6.1, some IIE values are fixed */
2122 if (IS_GEN_IIE(hpriv)) {
2123 m2 &= ~0xC30FF01F;
2124 m2 |= 0x0000900F;
2125 }
2126
bca1c4eb
JG
2127 writel(m2, port_mmio + PHY_MODE2);
2128}
2129
c9d39130
JG
2130static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2131 unsigned int port_no)
2132{
2133 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2134
2135 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2136
ee9ccdf7 2137 if (IS_GEN_II(hpriv)) {
c9d39130 2138 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2139 ifctl |= (1 << 7); /* enable gen2i speed */
2140 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
c9d39130
JG
2141 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2142 }
2143
2144 udelay(25); /* allow reset propagation */
2145
2146 /* Spec never mentions clearing the bit. Marvell's driver does
2147 * clear the bit, however.
2148 */
2149 writelfl(0, port_mmio + EDMA_CMD_OFS);
2150
2151 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2152
ee9ccdf7 2153 if (IS_GEN_I(hpriv))
c9d39130
JG
2154 mdelay(1);
2155}
2156
05b308e1 2157/**
bdd4ddde 2158 * mv_phy_reset - Perform eDMA reset followed by COMRESET
05b308e1
BR
2159 * @ap: ATA channel to manipulate
2160 *
2161 * Part of this is taken from __sata_phy_reset and modified to
2162 * not sleep since this routine gets called from interrupt level.
2163 *
2164 * LOCKING:
2165 * Inherited from caller. This is coded to safe to call at
2166 * interrupt level, i.e. it does not sleep.
31961943 2167 */
bdd4ddde
JG
2168static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2169 unsigned long deadline)
20f733e7 2170{
095fec88 2171 struct mv_port_priv *pp = ap->private_data;
cca3974e 2172 struct mv_host_priv *hpriv = ap->host->private_data;
20f733e7 2173 void __iomem *port_mmio = mv_ap_base(ap);
22374677
JG
2174 int retry = 5;
2175 u32 sstatus;
20f733e7
BR
2176
2177 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2178
da3dbb17
TH
2179#ifdef DEBUG
2180 {
2181 u32 sstatus, serror, scontrol;
2182
2183 mv_scr_read(ap, SCR_STATUS, &sstatus);
2184 mv_scr_read(ap, SCR_ERROR, &serror);
2185 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2186 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2d79ab8f 2187 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
da3dbb17
TH
2188 }
2189#endif
20f733e7 2190
22374677
JG
2191 /* Issue COMRESET via SControl */
2192comreset_retry:
936fd732 2193 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
bdd4ddde 2194 msleep(1);
22374677 2195
936fd732 2196 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
bdd4ddde 2197 msleep(20);
22374677 2198
31961943 2199 do {
936fd732 2200 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
62f1d0e6 2201 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
31961943 2202 break;
22374677 2203
bdd4ddde 2204 msleep(1);
c5d3e45a 2205 } while (time_before(jiffies, deadline));
20f733e7 2206
22374677 2207 /* work around errata */
ee9ccdf7 2208 if (IS_GEN_II(hpriv) &&
22374677
JG
2209 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2210 (retry-- > 0))
2211 goto comreset_retry;
095fec88 2212
da3dbb17
TH
2213#ifdef DEBUG
2214 {
2215 u32 sstatus, serror, scontrol;
2216
2217 mv_scr_read(ap, SCR_STATUS, &sstatus);
2218 mv_scr_read(ap, SCR_ERROR, &serror);
2219 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2220 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2221 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2222 }
2223#endif
31961943 2224
936fd732 2225 if (ata_link_offline(&ap->link)) {
bdd4ddde 2226 *class = ATA_DEV_NONE;
20f733e7
BR
2227 return;
2228 }
2229
22374677
JG
2230 /* even after SStatus reflects that device is ready,
2231 * it seems to take a while for link to be fully
2232 * established (and thus Status no longer 0x80/0x7F),
2233 * so we poll a bit for that, here.
2234 */
2235 retry = 20;
2236 while (1) {
2237 u8 drv_stat = ata_check_status(ap);
2238 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2239 break;
bdd4ddde 2240 msleep(500);
22374677
JG
2241 if (retry-- <= 0)
2242 break;
bdd4ddde
JG
2243 if (time_after(jiffies, deadline))
2244 break;
22374677
JG
2245 }
2246
bdd4ddde
JG
2247 /* FIXME: if we passed the deadline, the following
2248 * code probably produces an invalid result
2249 */
20f733e7 2250
bdd4ddde 2251 /* finally, read device signature from TF registers */
3f19859e 2252 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
095fec88
JG
2253
2254 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2255
bdd4ddde 2256 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
095fec88 2257
bca1c4eb 2258 VPRINTK("EXIT\n");
20f733e7
BR
2259}
2260
cc0680a5 2261static int mv_prereset(struct ata_link *link, unsigned long deadline)
22374677 2262{
cc0680a5 2263 struct ata_port *ap = link->ap;
bdd4ddde 2264 struct mv_port_priv *pp = ap->private_data;
cc0680a5 2265 struct ata_eh_context *ehc = &link->eh_context;
bdd4ddde 2266 int rc;
0ea9e179 2267
bdd4ddde
JG
2268 rc = mv_stop_dma(ap);
2269 if (rc)
2270 ehc->i.action |= ATA_EH_HARDRESET;
2271
2272 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2273 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2274 ehc->i.action |= ATA_EH_HARDRESET;
2275 }
2276
2277 /* if we're about to do hardreset, nothing more to do */
2278 if (ehc->i.action & ATA_EH_HARDRESET)
2279 return 0;
2280
cc0680a5 2281 if (ata_link_online(link))
bdd4ddde
JG
2282 rc = ata_wait_ready(ap, deadline);
2283 else
2284 rc = -ENODEV;
2285
2286 return rc;
22374677
JG
2287}
2288
cc0680a5 2289static int mv_hardreset(struct ata_link *link, unsigned int *class,
bdd4ddde 2290 unsigned long deadline)
31961943 2291{
cc0680a5 2292 struct ata_port *ap = link->ap;
bdd4ddde 2293 struct mv_host_priv *hpriv = ap->host->private_data;
0d5ff566 2294 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
31961943 2295
bdd4ddde 2296 mv_stop_dma(ap);
31961943 2297
bdd4ddde 2298 mv_channel_reset(hpriv, mmio, ap->port_no);
31961943 2299
bdd4ddde
JG
2300 mv_phy_reset(ap, class, deadline);
2301
2302 return 0;
2303}
2304
cc0680a5 2305static void mv_postreset(struct ata_link *link, unsigned int *classes)
bdd4ddde 2306{
cc0680a5 2307 struct ata_port *ap = link->ap;
bdd4ddde
JG
2308 u32 serr;
2309
2310 /* print link status */
cc0680a5 2311 sata_print_link_status(link);
31961943 2312
bdd4ddde 2313 /* clear SError */
cc0680a5
TH
2314 sata_scr_read(link, SCR_ERROR, &serr);
2315 sata_scr_write_flush(link, SCR_ERROR, serr);
bdd4ddde
JG
2316
2317 /* bail out if no device is present */
2318 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2319 DPRINTK("EXIT, no device\n");
2320 return;
9b358e30 2321 }
bdd4ddde
JG
2322
2323 /* set up device control */
2324 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2325}
2326
2327static void mv_error_handler(struct ata_port *ap)
2328{
2329 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2330 mv_hardreset, mv_postreset);
2331}
2332
2333static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2334{
2335 mv_stop_dma(qc->ap);
2336}
2337
2338static void mv_eh_freeze(struct ata_port *ap)
2339{
2340 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2341 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2342 u32 tmp, mask;
2343 unsigned int shift;
2344
2345 /* FIXME: handle coalescing completion events properly */
2346
2347 shift = ap->port_no * 2;
2348 if (hc > 0)
2349 shift++;
2350
2351 mask = 0x3 << shift;
2352
2353 /* disable assertion of portN err, done events */
2354 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2355 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2356}
2357
2358static void mv_eh_thaw(struct ata_port *ap)
2359{
2360 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2361 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2362 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2363 void __iomem *port_mmio = mv_ap_base(ap);
2364 u32 tmp, mask, hc_irq_cause;
2365 unsigned int shift, hc_port_no = ap->port_no;
2366
2367 /* FIXME: handle coalescing completion events properly */
2368
2369 shift = ap->port_no * 2;
2370 if (hc > 0) {
2371 shift++;
2372 hc_port_no -= 4;
2373 }
2374
2375 mask = 0x3 << shift;
2376
2377 /* clear EDMA errors on this port */
2378 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2379
2380 /* clear pending irq events */
2381 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2382 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2383 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2384 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2385
2386 /* enable assertion of portN err, done events */
2387 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2388 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
31961943
BR
2389}
2390
05b308e1
BR
2391/**
2392 * mv_port_init - Perform some early initialization on a single port.
2393 * @port: libata data structure storing shadow register addresses
2394 * @port_mmio: base address of the port
2395 *
2396 * Initialize shadow register mmio addresses, clear outstanding
2397 * interrupts on the port, and unmask interrupts for the future
2398 * start of the port.
2399 *
2400 * LOCKING:
2401 * Inherited from caller.
2402 */
31961943 2403static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
20f733e7 2404{
0d5ff566 2405 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
31961943
BR
2406 unsigned serr_ofs;
2407
8b260248 2408 /* PIO related setup
31961943
BR
2409 */
2410 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
8b260248 2411 port->error_addr =
31961943
BR
2412 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2413 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2414 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2415 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2416 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2417 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
8b260248 2418 port->status_addr =
31961943
BR
2419 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2420 /* special case: control/altstatus doesn't have ATA_REG_ address */
2421 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2422
2423 /* unused: */
8d9db2d2 2424 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
20f733e7 2425
31961943
BR
2426 /* Clear any currently outstanding port interrupt conditions */
2427 serr_ofs = mv_scr_offset(SCR_ERROR);
2428 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2429 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2430
20f733e7 2431 /* unmask all EDMA error interrupts */
31961943 2432 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
20f733e7 2433
8b260248 2434 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
31961943
BR
2435 readl(port_mmio + EDMA_CFG_OFS),
2436 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2437 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
20f733e7
BR
2438}
2439
4447d351 2440static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
bca1c4eb 2441{
4447d351
TH
2442 struct pci_dev *pdev = to_pci_dev(host->dev);
2443 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb
JG
2444 u32 hp_flags = hpriv->hp_flags;
2445
5796d1c4 2446 switch (board_idx) {
47c2b677
JG
2447 case chip_5080:
2448 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2449 hp_flags |= MV_HP_GEN_I;
47c2b677 2450
44c10138 2451 switch (pdev->revision) {
47c2b677
JG
2452 case 0x1:
2453 hp_flags |= MV_HP_ERRATA_50XXB0;
2454 break;
2455 case 0x3:
2456 hp_flags |= MV_HP_ERRATA_50XXB2;
2457 break;
2458 default:
2459 dev_printk(KERN_WARNING, &pdev->dev,
2460 "Applying 50XXB2 workarounds to unknown rev\n");
2461 hp_flags |= MV_HP_ERRATA_50XXB2;
2462 break;
2463 }
2464 break;
2465
bca1c4eb
JG
2466 case chip_504x:
2467 case chip_508x:
47c2b677 2468 hpriv->ops = &mv5xxx_ops;
ee9ccdf7 2469 hp_flags |= MV_HP_GEN_I;
bca1c4eb 2470
44c10138 2471 switch (pdev->revision) {
47c2b677
JG
2472 case 0x0:
2473 hp_flags |= MV_HP_ERRATA_50XXB0;
2474 break;
2475 case 0x3:
2476 hp_flags |= MV_HP_ERRATA_50XXB2;
2477 break;
2478 default:
2479 dev_printk(KERN_WARNING, &pdev->dev,
2480 "Applying B2 workarounds to unknown rev\n");
2481 hp_flags |= MV_HP_ERRATA_50XXB2;
2482 break;
bca1c4eb
JG
2483 }
2484 break;
2485
2486 case chip_604x:
2487 case chip_608x:
47c2b677 2488 hpriv->ops = &mv6xxx_ops;
ee9ccdf7 2489 hp_flags |= MV_HP_GEN_II;
47c2b677 2490
44c10138 2491 switch (pdev->revision) {
47c2b677
JG
2492 case 0x7:
2493 hp_flags |= MV_HP_ERRATA_60X1B2;
2494 break;
2495 case 0x9:
2496 hp_flags |= MV_HP_ERRATA_60X1C0;
bca1c4eb
JG
2497 break;
2498 default:
2499 dev_printk(KERN_WARNING, &pdev->dev,
47c2b677
JG
2500 "Applying B2 workarounds to unknown rev\n");
2501 hp_flags |= MV_HP_ERRATA_60X1B2;
bca1c4eb
JG
2502 break;
2503 }
2504 break;
2505
e4e7b892 2506 case chip_7042:
02a121da 2507 hp_flags |= MV_HP_PCIE;
306b30f7
ML
2508 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2509 (pdev->device == 0x2300 || pdev->device == 0x2310))
2510 {
4e520033
ML
2511 /*
2512 * Highpoint RocketRAID PCIe 23xx series cards:
2513 *
2514 * Unconfigured drives are treated as "Legacy"
2515 * by the BIOS, and it overwrites sector 8 with
2516 * a "Lgcy" metadata block prior to Linux boot.
2517 *
2518 * Configured drives (RAID or JBOD) leave sector 8
2519 * alone, but instead overwrite a high numbered
2520 * sector for the RAID metadata. This sector can
2521 * be determined exactly, by truncating the physical
2522 * drive capacity to a nice even GB value.
2523 *
2524 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2525 *
2526 * Warn the user, lest they think we're just buggy.
2527 */
2528 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2529 " BIOS CORRUPTS DATA on all attached drives,"
2530 " regardless of if/how they are configured."
2531 " BEWARE!\n");
2532 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2533 " use sectors 8-9 on \"Legacy\" drives,"
2534 " and avoid the final two gigabytes on"
2535 " all RocketRAID BIOS initialized drives.\n");
306b30f7 2536 }
e4e7b892
JG
2537 case chip_6042:
2538 hpriv->ops = &mv6xxx_ops;
e4e7b892
JG
2539 hp_flags |= MV_HP_GEN_IIE;
2540
44c10138 2541 switch (pdev->revision) {
e4e7b892
JG
2542 case 0x0:
2543 hp_flags |= MV_HP_ERRATA_XX42A0;
2544 break;
2545 case 0x1:
2546 hp_flags |= MV_HP_ERRATA_60X1C0;
2547 break;
2548 default:
2549 dev_printk(KERN_WARNING, &pdev->dev,
2550 "Applying 60X1C0 workarounds to unknown rev\n");
2551 hp_flags |= MV_HP_ERRATA_60X1C0;
2552 break;
2553 }
2554 break;
2555
bca1c4eb 2556 default:
5796d1c4
JG
2557 dev_printk(KERN_ERR, &pdev->dev,
2558 "BUG: invalid board index %u\n", board_idx);
bca1c4eb
JG
2559 return 1;
2560 }
2561
2562 hpriv->hp_flags = hp_flags;
02a121da
ML
2563 if (hp_flags & MV_HP_PCIE) {
2564 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2565 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2566 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2567 } else {
2568 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2569 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2570 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2571 }
bca1c4eb
JG
2572
2573 return 0;
2574}
2575
05b308e1 2576/**
47c2b677 2577 * mv_init_host - Perform some early initialization of the host.
4447d351
TH
2578 * @host: ATA host to initialize
2579 * @board_idx: controller index
05b308e1
BR
2580 *
2581 * If possible, do an early global reset of the host. Then do
2582 * our port init and clear/unmask all/relevant host interrupts.
2583 *
2584 * LOCKING:
2585 * Inherited from caller.
2586 */
4447d351 2587static int mv_init_host(struct ata_host *host, unsigned int board_idx)
20f733e7
BR
2588{
2589 int rc = 0, n_hc, port, hc;
4447d351
TH
2590 struct pci_dev *pdev = to_pci_dev(host->dev);
2591 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2592 struct mv_host_priv *hpriv = host->private_data;
bca1c4eb 2593
47c2b677
JG
2594 /* global interrupt mask */
2595 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2596
4447d351 2597 rc = mv_chip_id(host, board_idx);
bca1c4eb
JG
2598 if (rc)
2599 goto done;
2600
4447d351 2601 n_hc = mv_get_hc_count(host->ports[0]->flags);
bca1c4eb 2602
4447d351 2603 for (port = 0; port < host->n_ports; port++)
47c2b677 2604 hpriv->ops->read_preamp(hpriv, port, mmio);
20f733e7 2605
c9d39130 2606 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
47c2b677 2607 if (rc)
20f733e7 2608 goto done;
20f733e7 2609
522479fb
JG
2610 hpriv->ops->reset_flash(hpriv, mmio);
2611 hpriv->ops->reset_bus(pdev, mmio);
47c2b677 2612 hpriv->ops->enable_leds(hpriv, mmio);
20f733e7 2613
4447d351 2614 for (port = 0; port < host->n_ports; port++) {
ee9ccdf7 2615 if (IS_GEN_II(hpriv)) {
c9d39130
JG
2616 void __iomem *port_mmio = mv_port_base(mmio, port);
2617
2a47ce06 2618 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
eb46d684
ML
2619 ifctl |= (1 << 7); /* enable gen2i speed */
2620 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2a47ce06
JG
2621 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2622 }
2623
c9d39130 2624 hpriv->ops->phy_errata(hpriv, mmio, port);
2a47ce06
JG
2625 }
2626
4447d351 2627 for (port = 0; port < host->n_ports; port++) {
cbcdd875 2628 struct ata_port *ap = host->ports[port];
2a47ce06 2629 void __iomem *port_mmio = mv_port_base(mmio, port);
cbcdd875
TH
2630 unsigned int offset = port_mmio - mmio;
2631
2632 mv_port_init(&ap->ioaddr, port_mmio);
2633
2634 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2635 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
20f733e7
BR
2636 }
2637
2638 for (hc = 0; hc < n_hc; hc++) {
31961943
BR
2639 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2640
2641 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2642 "(before clear)=0x%08x\n", hc,
2643 readl(hc_mmio + HC_CFG_OFS),
2644 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2645
2646 /* Clear any currently outstanding hc interrupt conditions */
2647 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
20f733e7
BR
2648 }
2649
31961943 2650 /* Clear any currently outstanding host interrupt conditions */
02a121da 2651 writelfl(0, mmio + hpriv->irq_cause_ofs);
31961943
BR
2652
2653 /* and unmask interrupt generation for host regs */
02a121da 2654 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
fb621e2f 2655
ee9ccdf7 2656 if (IS_GEN_I(hpriv))
fb621e2f
JG
2657 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2658 else
2659 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
20f733e7
BR
2660
2661 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
8b260248 2662 "PCI int cause/mask=0x%08x/0x%08x\n",
20f733e7
BR
2663 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2664 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
02a121da
ML
2665 readl(mmio + hpriv->irq_cause_ofs),
2666 readl(mmio + hpriv->irq_mask_ofs));
bca1c4eb 2667
31961943 2668done:
20f733e7
BR
2669 return rc;
2670}
2671
05b308e1
BR
2672/**
2673 * mv_print_info - Dump key info to kernel log for perusal.
4447d351 2674 * @host: ATA host to print info about
05b308e1
BR
2675 *
2676 * FIXME: complete this.
2677 *
2678 * LOCKING:
2679 * Inherited from caller.
2680 */
4447d351 2681static void mv_print_info(struct ata_host *host)
31961943 2682{
4447d351
TH
2683 struct pci_dev *pdev = to_pci_dev(host->dev);
2684 struct mv_host_priv *hpriv = host->private_data;
44c10138 2685 u8 scc;
c1e4fe71 2686 const char *scc_s, *gen;
31961943
BR
2687
2688 /* Use this to determine the HW stepping of the chip so we know
2689 * what errata to workaround
2690 */
31961943
BR
2691 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2692 if (scc == 0)
2693 scc_s = "SCSI";
2694 else if (scc == 0x01)
2695 scc_s = "RAID";
2696 else
c1e4fe71
JG
2697 scc_s = "?";
2698
2699 if (IS_GEN_I(hpriv))
2700 gen = "I";
2701 else if (IS_GEN_II(hpriv))
2702 gen = "II";
2703 else if (IS_GEN_IIE(hpriv))
2704 gen = "IIE";
2705 else
2706 gen = "?";
31961943 2707
a9524a76 2708 dev_printk(KERN_INFO, &pdev->dev,
c1e4fe71
JG
2709 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2710 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
31961943
BR
2711 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2712}
2713
05b308e1
BR
2714/**
2715 * mv_init_one - handle a positive probe of a Marvell host
2716 * @pdev: PCI device found
2717 * @ent: PCI device ID entry for the matched host
2718 *
2719 * LOCKING:
2720 * Inherited from caller.
2721 */
20f733e7
BR
2722static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2723{
2dcb407e 2724 static int printed_version;
20f733e7 2725 unsigned int board_idx = (unsigned int)ent->driver_data;
4447d351
TH
2726 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2727 struct ata_host *host;
2728 struct mv_host_priv *hpriv;
2729 int n_ports, rc;
20f733e7 2730
a9524a76
JG
2731 if (!printed_version++)
2732 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
20f733e7 2733
4447d351
TH
2734 /* allocate host */
2735 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2736
2737 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2738 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2739 if (!host || !hpriv)
2740 return -ENOMEM;
2741 host->private_data = hpriv;
2742
2743 /* acquire resources */
24dc5f33
TH
2744 rc = pcim_enable_device(pdev);
2745 if (rc)
20f733e7 2746 return rc;
20f733e7 2747
0d5ff566
TH
2748 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2749 if (rc == -EBUSY)
24dc5f33 2750 pcim_pin_device(pdev);
0d5ff566 2751 if (rc)
24dc5f33 2752 return rc;
4447d351 2753 host->iomap = pcim_iomap_table(pdev);
20f733e7 2754
d88184fb
JG
2755 rc = pci_go_64(pdev);
2756 if (rc)
2757 return rc;
2758
20f733e7 2759 /* initialize adapter */
4447d351 2760 rc = mv_init_host(host, board_idx);
24dc5f33
TH
2761 if (rc)
2762 return rc;
20f733e7 2763
31961943 2764 /* Enable interrupts */
6a59dcf8 2765 if (msi && pci_enable_msi(pdev))
31961943 2766 pci_intx(pdev, 1);
20f733e7 2767
31961943 2768 mv_dump_pci_cfg(pdev, 0x68);
4447d351 2769 mv_print_info(host);
20f733e7 2770
4447d351 2771 pci_set_master(pdev);
ea8b4db9 2772 pci_try_set_mwi(pdev);
4447d351 2773 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
c5d3e45a 2774 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
20f733e7
BR
2775}
2776
2777static int __init mv_init(void)
2778{
b7887196 2779 return pci_register_driver(&mv_pci_driver);
20f733e7
BR
2780}
2781
2782static void __exit mv_exit(void)
2783{
2784 pci_unregister_driver(&mv_pci_driver);
2785}
2786
2787MODULE_AUTHOR("Brett Russ");
2788MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2789MODULE_LICENSE("GPL");
2790MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2791MODULE_VERSION(DRV_VERSION);
2792
ddef9bb3
JG
2793module_param(msi, int, 0444);
2794MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2795
20f733e7
BR
2796module_init(mv_init);
2797module_exit(mv_exit);
This page took 0.670586 seconds and 5 git commands to generate.