b3b3da4eaa0321b140440913e7e3f8157e75fae1
[deliverable/linux.git] / drivers / ata / sata_mv.c
1 /*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
62 */
63
64
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
81
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
84
85 enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_MAX_SG_CT = 256,
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
123
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
140
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
155 /* PCI interface registers */
156
157 PCI_COMMAND_OFS = 0xc00,
158
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
182
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
226 PHY_MODE3 = 0x310,
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
260
261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
268
269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
277
278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
293 EDMA_ERR_CRQB_PAR |
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
308 EDMA_ERR_CRQB_PAR |
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
312
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
328
329 EDMA_IORDY_TMOUT = 0x34,
330 EDMA_ARB_CFG = 0x38,
331
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
338 MV_HP_ERRATA_XX42A0 = (1 << 5),
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
343
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
348 };
349
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
354
355 enum {
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
360
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368 };
369
370 enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
376 chip_6042,
377 chip_7042,
378 chip_soc,
379 };
380
381 /* Command ReQuest Block: 32B */
382 struct mv_crqb {
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
387 };
388
389 struct mv_crqb_iie {
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
395 };
396
397 /* Command ResPonse Block: 8B */
398 struct mv_crpb {
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
402 };
403
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405 struct mv_sg {
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
410 };
411
412 struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
423 u32 pp_flags;
424 };
425
426 struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429 };
430
431 struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
450 };
451
452 struct mv_hw_ops {
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
462 };
463
464 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
468 static int mv_port_start(struct ata_port *ap);
469 static void mv_port_stop(struct ata_port *ap);
470 static void mv_qc_prep(struct ata_queued_cmd *qc);
471 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
472 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
473 static void mv_error_handler(struct ata_port *ap);
474 static void mv_eh_freeze(struct ata_port *ap);
475 static void mv_eh_thaw(struct ata_port *ap);
476 static void mv6_dev_config(struct ata_device *dev);
477
478 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
480 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
483 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
485 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
486 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
487
488 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489 unsigned int port);
490 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
492 void __iomem *mmio);
493 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int n_hc);
495 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
496 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
497 void __iomem *mmio);
498 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
499 void __iomem *mmio);
500 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501 void __iomem *mmio, unsigned int n_hc);
502 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
503 void __iomem *mmio);
504 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
505 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
506 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int port_no);
508 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509 void __iomem *port_mmio, int want_ncq);
510 static int __mv_stop_dma(struct ata_port *ap);
511
512 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
515 */
516 static struct scsi_host_template mv5_sht = {
517 .module = THIS_MODULE,
518 .name = DRV_NAME,
519 .ioctl = ata_scsi_ioctl,
520 .queuecommand = ata_scsi_queuecmd,
521 .can_queue = ATA_DEF_QUEUE,
522 .this_id = ATA_SHT_THIS_ID,
523 .sg_tablesize = MV_MAX_SG_CT / 2,
524 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
525 .emulated = ATA_SHT_EMULATED,
526 .use_clustering = 1,
527 .proc_name = DRV_NAME,
528 .dma_boundary = MV_DMA_BOUNDARY,
529 .slave_configure = ata_scsi_slave_config,
530 .slave_destroy = ata_scsi_slave_destroy,
531 .bios_param = ata_std_bios_param,
532 };
533
534 static struct scsi_host_template mv6_sht = {
535 .module = THIS_MODULE,
536 .name = DRV_NAME,
537 .ioctl = ata_scsi_ioctl,
538 .queuecommand = ata_scsi_queuecmd,
539 .change_queue_depth = ata_scsi_change_queue_depth,
540 .can_queue = MV_MAX_Q_DEPTH - 1,
541 .this_id = ATA_SHT_THIS_ID,
542 .sg_tablesize = MV_MAX_SG_CT / 2,
543 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
544 .emulated = ATA_SHT_EMULATED,
545 .use_clustering = 1,
546 .proc_name = DRV_NAME,
547 .dma_boundary = MV_DMA_BOUNDARY,
548 .slave_configure = ata_scsi_slave_config,
549 .slave_destroy = ata_scsi_slave_destroy,
550 .bios_param = ata_std_bios_param,
551 };
552
553 static const struct ata_port_operations mv5_ops = {
554 .tf_load = ata_tf_load,
555 .tf_read = ata_tf_read,
556 .check_status = ata_check_status,
557 .exec_command = ata_exec_command,
558 .dev_select = ata_std_dev_select,
559
560 .cable_detect = ata_cable_sata,
561
562 .qc_prep = mv_qc_prep,
563 .qc_issue = mv_qc_issue,
564 .data_xfer = ata_data_xfer,
565
566 .irq_clear = ata_noop_irq_clear,
567 .irq_on = ata_irq_on,
568
569 .error_handler = mv_error_handler,
570 .freeze = mv_eh_freeze,
571 .thaw = mv_eh_thaw,
572
573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
575
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
578 };
579
580 static const struct ata_port_operations mv6_ops = {
581 .dev_config = mv6_dev_config,
582 .tf_load = ata_tf_load,
583 .tf_read = ata_tf_read,
584 .check_status = ata_check_status,
585 .exec_command = ata_exec_command,
586 .dev_select = ata_std_dev_select,
587
588 .cable_detect = ata_cable_sata,
589
590 .qc_prep = mv_qc_prep,
591 .qc_issue = mv_qc_issue,
592 .data_xfer = ata_data_xfer,
593
594 .irq_clear = ata_noop_irq_clear,
595 .irq_on = ata_irq_on,
596
597 .error_handler = mv_error_handler,
598 .freeze = mv_eh_freeze,
599 .thaw = mv_eh_thaw,
600 .qc_defer = ata_std_qc_defer,
601
602 .scr_read = mv_scr_read,
603 .scr_write = mv_scr_write,
604
605 .port_start = mv_port_start,
606 .port_stop = mv_port_stop,
607 };
608
609 static const struct ata_port_operations mv_iie_ops = {
610 .tf_load = ata_tf_load,
611 .tf_read = ata_tf_read,
612 .check_status = ata_check_status,
613 .exec_command = ata_exec_command,
614 .dev_select = ata_std_dev_select,
615
616 .cable_detect = ata_cable_sata,
617
618 .qc_prep = mv_qc_prep_iie,
619 .qc_issue = mv_qc_issue,
620 .data_xfer = ata_data_xfer,
621
622 .irq_clear = ata_noop_irq_clear,
623 .irq_on = ata_irq_on,
624
625 .error_handler = mv_error_handler,
626 .freeze = mv_eh_freeze,
627 .thaw = mv_eh_thaw,
628 .qc_defer = ata_std_qc_defer,
629
630 .scr_read = mv_scr_read,
631 .scr_write = mv_scr_write,
632
633 .port_start = mv_port_start,
634 .port_stop = mv_port_stop,
635 };
636
637 static const struct ata_port_info mv_port_info[] = {
638 { /* chip_504x */
639 .flags = MV_COMMON_FLAGS,
640 .pio_mask = 0x1f, /* pio0-4 */
641 .udma_mask = ATA_UDMA6,
642 .port_ops = &mv5_ops,
643 },
644 { /* chip_508x */
645 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
646 .pio_mask = 0x1f, /* pio0-4 */
647 .udma_mask = ATA_UDMA6,
648 .port_ops = &mv5_ops,
649 },
650 { /* chip_5080 */
651 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
652 .pio_mask = 0x1f, /* pio0-4 */
653 .udma_mask = ATA_UDMA6,
654 .port_ops = &mv5_ops,
655 },
656 { /* chip_604x */
657 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
658 ATA_FLAG_NCQ,
659 .pio_mask = 0x1f, /* pio0-4 */
660 .udma_mask = ATA_UDMA6,
661 .port_ops = &mv6_ops,
662 },
663 { /* chip_608x */
664 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
665 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
666 .pio_mask = 0x1f, /* pio0-4 */
667 .udma_mask = ATA_UDMA6,
668 .port_ops = &mv6_ops,
669 },
670 { /* chip_6042 */
671 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
672 ATA_FLAG_NCQ,
673 .pio_mask = 0x1f, /* pio0-4 */
674 .udma_mask = ATA_UDMA6,
675 .port_ops = &mv_iie_ops,
676 },
677 { /* chip_7042 */
678 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
679 ATA_FLAG_NCQ,
680 .pio_mask = 0x1f, /* pio0-4 */
681 .udma_mask = ATA_UDMA6,
682 .port_ops = &mv_iie_ops,
683 },
684 { /* chip_soc */
685 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
686 .pio_mask = 0x1f, /* pio0-4 */
687 .udma_mask = ATA_UDMA6,
688 .port_ops = &mv_iie_ops,
689 },
690 };
691
692 static const struct pci_device_id mv_pci_tbl[] = {
693 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
694 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
695 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
696 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
697 /* RocketRAID 1740/174x have different identifiers */
698 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
699 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
700
701 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
702 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
703 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
704 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
705 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
706
707 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
708
709 /* Adaptec 1430SA */
710 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
711
712 /* Marvell 7042 support */
713 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
714
715 /* Highpoint RocketRAID PCIe series */
716 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
717 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
718
719 { } /* terminate list */
720 };
721
722 static const struct mv_hw_ops mv5xxx_ops = {
723 .phy_errata = mv5_phy_errata,
724 .enable_leds = mv5_enable_leds,
725 .read_preamp = mv5_read_preamp,
726 .reset_hc = mv5_reset_hc,
727 .reset_flash = mv5_reset_flash,
728 .reset_bus = mv5_reset_bus,
729 };
730
731 static const struct mv_hw_ops mv6xxx_ops = {
732 .phy_errata = mv6_phy_errata,
733 .enable_leds = mv6_enable_leds,
734 .read_preamp = mv6_read_preamp,
735 .reset_hc = mv6_reset_hc,
736 .reset_flash = mv6_reset_flash,
737 .reset_bus = mv_reset_pci_bus,
738 };
739
740 static const struct mv_hw_ops mv_soc_ops = {
741 .phy_errata = mv6_phy_errata,
742 .enable_leds = mv_soc_enable_leds,
743 .read_preamp = mv_soc_read_preamp,
744 .reset_hc = mv_soc_reset_hc,
745 .reset_flash = mv_soc_reset_flash,
746 .reset_bus = mv_soc_reset_bus,
747 };
748
749 /*
750 * Functions
751 */
752
753 static inline void writelfl(unsigned long data, void __iomem *addr)
754 {
755 writel(data, addr);
756 (void) readl(addr); /* flush to avoid PCI posted write */
757 }
758
759 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
760 {
761 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
762 }
763
764 static inline unsigned int mv_hc_from_port(unsigned int port)
765 {
766 return port >> MV_PORT_HC_SHIFT;
767 }
768
769 static inline unsigned int mv_hardport_from_port(unsigned int port)
770 {
771 return port & MV_PORT_MASK;
772 }
773
774 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
775 unsigned int port)
776 {
777 return mv_hc_base(base, mv_hc_from_port(port));
778 }
779
780 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
781 {
782 return mv_hc_base_from_port(base, port) +
783 MV_SATAHC_ARBTR_REG_SZ +
784 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
785 }
786
787 static inline void __iomem *mv_host_base(struct ata_host *host)
788 {
789 struct mv_host_priv *hpriv = host->private_data;
790 return hpriv->base;
791 }
792
793 static inline void __iomem *mv_ap_base(struct ata_port *ap)
794 {
795 return mv_port_base(mv_host_base(ap->host), ap->port_no);
796 }
797
798 static inline int mv_get_hc_count(unsigned long port_flags)
799 {
800 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
801 }
802
803 static void mv_set_edma_ptrs(void __iomem *port_mmio,
804 struct mv_host_priv *hpriv,
805 struct mv_port_priv *pp)
806 {
807 u32 index;
808
809 /*
810 * initialize request queue
811 */
812 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
813
814 WARN_ON(pp->crqb_dma & 0x3ff);
815 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
816 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
817 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
818
819 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
820 writelfl((pp->crqb_dma & 0xffffffff) | index,
821 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
822 else
823 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
824
825 /*
826 * initialize response queue
827 */
828 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
829
830 WARN_ON(pp->crpb_dma & 0xff);
831 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
832
833 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
834 writelfl((pp->crpb_dma & 0xffffffff) | index,
835 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
836 else
837 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
838
839 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
840 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
841 }
842
843 /**
844 * mv_start_dma - Enable eDMA engine
845 * @base: port base address
846 * @pp: port private data
847 *
848 * Verify the local cache of the eDMA state is accurate with a
849 * WARN_ON.
850 *
851 * LOCKING:
852 * Inherited from caller.
853 */
854 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
855 struct mv_port_priv *pp, u8 protocol)
856 {
857 int want_ncq = (protocol == ATA_PROT_NCQ);
858
859 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
860 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
861 if (want_ncq != using_ncq)
862 __mv_stop_dma(ap);
863 }
864 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
865 struct mv_host_priv *hpriv = ap->host->private_data;
866 int hard_port = mv_hardport_from_port(ap->port_no);
867 void __iomem *hc_mmio = mv_hc_base_from_port(
868 mv_host_base(ap->host), hard_port);
869 u32 hc_irq_cause, ipending;
870
871 /* clear EDMA event indicators, if any */
872 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
873
874 /* clear EDMA interrupt indicator, if any */
875 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
876 ipending = (DEV_IRQ << hard_port) |
877 (CRPB_DMA_DONE << hard_port);
878 if (hc_irq_cause & ipending) {
879 writelfl(hc_irq_cause & ~ipending,
880 hc_mmio + HC_IRQ_CAUSE_OFS);
881 }
882
883 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
884
885 /* clear FIS IRQ Cause */
886 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
887
888 mv_set_edma_ptrs(port_mmio, hpriv, pp);
889
890 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
891 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
892 }
893 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
894 }
895
896 /**
897 * __mv_stop_dma - Disable eDMA engine
898 * @ap: ATA channel to manipulate
899 *
900 * Verify the local cache of the eDMA state is accurate with a
901 * WARN_ON.
902 *
903 * LOCKING:
904 * Inherited from caller.
905 */
906 static int __mv_stop_dma(struct ata_port *ap)
907 {
908 void __iomem *port_mmio = mv_ap_base(ap);
909 struct mv_port_priv *pp = ap->private_data;
910 u32 reg;
911 int i, err = 0;
912
913 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
914 /* Disable EDMA if active. The disable bit auto clears.
915 */
916 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
917 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
918 } else {
919 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
920 }
921
922 /* now properly wait for the eDMA to stop */
923 for (i = 1000; i > 0; i--) {
924 reg = readl(port_mmio + EDMA_CMD_OFS);
925 if (!(reg & EDMA_EN))
926 break;
927
928 udelay(100);
929 }
930
931 if (reg & EDMA_EN) {
932 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
933 err = -EIO;
934 }
935
936 return err;
937 }
938
939 static int mv_stop_dma(struct ata_port *ap)
940 {
941 unsigned long flags;
942 int rc;
943
944 spin_lock_irqsave(&ap->host->lock, flags);
945 rc = __mv_stop_dma(ap);
946 spin_unlock_irqrestore(&ap->host->lock, flags);
947
948 return rc;
949 }
950
951 #ifdef ATA_DEBUG
952 static void mv_dump_mem(void __iomem *start, unsigned bytes)
953 {
954 int b, w;
955 for (b = 0; b < bytes; ) {
956 DPRINTK("%p: ", start + b);
957 for (w = 0; b < bytes && w < 4; w++) {
958 printk("%08x ", readl(start + b));
959 b += sizeof(u32);
960 }
961 printk("\n");
962 }
963 }
964 #endif
965
966 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
967 {
968 #ifdef ATA_DEBUG
969 int b, w;
970 u32 dw;
971 for (b = 0; b < bytes; ) {
972 DPRINTK("%02x: ", b);
973 for (w = 0; b < bytes && w < 4; w++) {
974 (void) pci_read_config_dword(pdev, b, &dw);
975 printk("%08x ", dw);
976 b += sizeof(u32);
977 }
978 printk("\n");
979 }
980 #endif
981 }
982 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
983 struct pci_dev *pdev)
984 {
985 #ifdef ATA_DEBUG
986 void __iomem *hc_base = mv_hc_base(mmio_base,
987 port >> MV_PORT_HC_SHIFT);
988 void __iomem *port_base;
989 int start_port, num_ports, p, start_hc, num_hcs, hc;
990
991 if (0 > port) {
992 start_hc = start_port = 0;
993 num_ports = 8; /* shld be benign for 4 port devs */
994 num_hcs = 2;
995 } else {
996 start_hc = port >> MV_PORT_HC_SHIFT;
997 start_port = port;
998 num_ports = num_hcs = 1;
999 }
1000 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1001 num_ports > 1 ? num_ports - 1 : start_port);
1002
1003 if (NULL != pdev) {
1004 DPRINTK("PCI config space regs:\n");
1005 mv_dump_pci_cfg(pdev, 0x68);
1006 }
1007 DPRINTK("PCI regs:\n");
1008 mv_dump_mem(mmio_base+0xc00, 0x3c);
1009 mv_dump_mem(mmio_base+0xd00, 0x34);
1010 mv_dump_mem(mmio_base+0xf00, 0x4);
1011 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1012 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1013 hc_base = mv_hc_base(mmio_base, hc);
1014 DPRINTK("HC regs (HC %i):\n", hc);
1015 mv_dump_mem(hc_base, 0x1c);
1016 }
1017 for (p = start_port; p < start_port + num_ports; p++) {
1018 port_base = mv_port_base(mmio_base, p);
1019 DPRINTK("EDMA regs (port %i):\n", p);
1020 mv_dump_mem(port_base, 0x54);
1021 DPRINTK("SATA regs (port %i):\n", p);
1022 mv_dump_mem(port_base+0x300, 0x60);
1023 }
1024 #endif
1025 }
1026
1027 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1028 {
1029 unsigned int ofs;
1030
1031 switch (sc_reg_in) {
1032 case SCR_STATUS:
1033 case SCR_CONTROL:
1034 case SCR_ERROR:
1035 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1036 break;
1037 case SCR_ACTIVE:
1038 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1039 break;
1040 default:
1041 ofs = 0xffffffffU;
1042 break;
1043 }
1044 return ofs;
1045 }
1046
1047 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1048 {
1049 unsigned int ofs = mv_scr_offset(sc_reg_in);
1050
1051 if (ofs != 0xffffffffU) {
1052 *val = readl(mv_ap_base(ap) + ofs);
1053 return 0;
1054 } else
1055 return -EINVAL;
1056 }
1057
1058 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1059 {
1060 unsigned int ofs = mv_scr_offset(sc_reg_in);
1061
1062 if (ofs != 0xffffffffU) {
1063 writelfl(val, mv_ap_base(ap) + ofs);
1064 return 0;
1065 } else
1066 return -EINVAL;
1067 }
1068
1069 static void mv6_dev_config(struct ata_device *adev)
1070 {
1071 /*
1072 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1073 * See mv_qc_prep() for more info.
1074 */
1075 if (adev->flags & ATA_DFLAG_NCQ)
1076 if (adev->max_sectors > ATA_MAX_SECTORS)
1077 adev->max_sectors = ATA_MAX_SECTORS;
1078 }
1079
1080 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1081 void __iomem *port_mmio, int want_ncq)
1082 {
1083 u32 cfg;
1084
1085 /* set up non-NCQ EDMA configuration */
1086 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1087
1088 if (IS_GEN_I(hpriv))
1089 cfg |= (1 << 8); /* enab config burst size mask */
1090
1091 else if (IS_GEN_II(hpriv))
1092 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1093
1094 else if (IS_GEN_IIE(hpriv)) {
1095 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1096 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1097 cfg |= (1 << 18); /* enab early completion */
1098 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1099 }
1100
1101 if (want_ncq) {
1102 cfg |= EDMA_CFG_NCQ;
1103 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1104 } else
1105 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1106
1107 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1108 }
1109
1110 static void mv_port_free_dma_mem(struct ata_port *ap)
1111 {
1112 struct mv_host_priv *hpriv = ap->host->private_data;
1113 struct mv_port_priv *pp = ap->private_data;
1114 int tag;
1115
1116 if (pp->crqb) {
1117 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1118 pp->crqb = NULL;
1119 }
1120 if (pp->crpb) {
1121 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1122 pp->crpb = NULL;
1123 }
1124 /*
1125 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1126 * For later hardware, we have one unique sg_tbl per NCQ tag.
1127 */
1128 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1129 if (pp->sg_tbl[tag]) {
1130 if (tag == 0 || !IS_GEN_I(hpriv))
1131 dma_pool_free(hpriv->sg_tbl_pool,
1132 pp->sg_tbl[tag],
1133 pp->sg_tbl_dma[tag]);
1134 pp->sg_tbl[tag] = NULL;
1135 }
1136 }
1137 }
1138
1139 /**
1140 * mv_port_start - Port specific init/start routine.
1141 * @ap: ATA channel to manipulate
1142 *
1143 * Allocate and point to DMA memory, init port private memory,
1144 * zero indices.
1145 *
1146 * LOCKING:
1147 * Inherited from caller.
1148 */
1149 static int mv_port_start(struct ata_port *ap)
1150 {
1151 struct device *dev = ap->host->dev;
1152 struct mv_host_priv *hpriv = ap->host->private_data;
1153 struct mv_port_priv *pp;
1154 void __iomem *port_mmio = mv_ap_base(ap);
1155 unsigned long flags;
1156 int tag;
1157
1158 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1159 if (!pp)
1160 return -ENOMEM;
1161 ap->private_data = pp;
1162
1163 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1164 if (!pp->crqb)
1165 return -ENOMEM;
1166 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1167
1168 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1169 if (!pp->crpb)
1170 goto out_port_free_dma_mem;
1171 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1172
1173 /*
1174 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1175 * For later hardware, we need one unique sg_tbl per NCQ tag.
1176 */
1177 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1178 if (tag == 0 || !IS_GEN_I(hpriv)) {
1179 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1180 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1181 if (!pp->sg_tbl[tag])
1182 goto out_port_free_dma_mem;
1183 } else {
1184 pp->sg_tbl[tag] = pp->sg_tbl[0];
1185 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1186 }
1187 }
1188
1189 spin_lock_irqsave(&ap->host->lock, flags);
1190
1191 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1192 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1193
1194 spin_unlock_irqrestore(&ap->host->lock, flags);
1195
1196 /* Don't turn on EDMA here...do it before DMA commands only. Else
1197 * we'll be unable to send non-data, PIO, etc due to restricted access
1198 * to shadow regs.
1199 */
1200 return 0;
1201
1202 out_port_free_dma_mem:
1203 mv_port_free_dma_mem(ap);
1204 return -ENOMEM;
1205 }
1206
1207 /**
1208 * mv_port_stop - Port specific cleanup/stop routine.
1209 * @ap: ATA channel to manipulate
1210 *
1211 * Stop DMA, cleanup port memory.
1212 *
1213 * LOCKING:
1214 * This routine uses the host lock to protect the DMA stop.
1215 */
1216 static void mv_port_stop(struct ata_port *ap)
1217 {
1218 mv_stop_dma(ap);
1219 mv_port_free_dma_mem(ap);
1220 }
1221
1222 /**
1223 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1224 * @qc: queued command whose SG list to source from
1225 *
1226 * Populate the SG list and mark the last entry.
1227 *
1228 * LOCKING:
1229 * Inherited from caller.
1230 */
1231 static void mv_fill_sg(struct ata_queued_cmd *qc)
1232 {
1233 struct mv_port_priv *pp = qc->ap->private_data;
1234 struct scatterlist *sg;
1235 struct mv_sg *mv_sg, *last_sg = NULL;
1236 unsigned int si;
1237
1238 mv_sg = pp->sg_tbl[qc->tag];
1239 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1240 dma_addr_t addr = sg_dma_address(sg);
1241 u32 sg_len = sg_dma_len(sg);
1242
1243 while (sg_len) {
1244 u32 offset = addr & 0xffff;
1245 u32 len = sg_len;
1246
1247 if ((offset + sg_len > 0x10000))
1248 len = 0x10000 - offset;
1249
1250 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1251 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1252 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1253
1254 sg_len -= len;
1255 addr += len;
1256
1257 last_sg = mv_sg;
1258 mv_sg++;
1259 }
1260 }
1261
1262 if (likely(last_sg))
1263 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1264 }
1265
1266 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1267 {
1268 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1269 (last ? CRQB_CMD_LAST : 0);
1270 *cmdw = cpu_to_le16(tmp);
1271 }
1272
1273 /**
1274 * mv_qc_prep - Host specific command preparation.
1275 * @qc: queued command to prepare
1276 *
1277 * This routine simply redirects to the general purpose routine
1278 * if command is not DMA. Else, it handles prep of the CRQB
1279 * (command request block), does some sanity checking, and calls
1280 * the SG load routine.
1281 *
1282 * LOCKING:
1283 * Inherited from caller.
1284 */
1285 static void mv_qc_prep(struct ata_queued_cmd *qc)
1286 {
1287 struct ata_port *ap = qc->ap;
1288 struct mv_port_priv *pp = ap->private_data;
1289 __le16 *cw;
1290 struct ata_taskfile *tf;
1291 u16 flags = 0;
1292 unsigned in_index;
1293
1294 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1295 (qc->tf.protocol != ATA_PROT_NCQ))
1296 return;
1297
1298 /* Fill in command request block
1299 */
1300 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1301 flags |= CRQB_FLAG_READ;
1302 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1303 flags |= qc->tag << CRQB_TAG_SHIFT;
1304
1305 /* get current queue index from software */
1306 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1307
1308 pp->crqb[in_index].sg_addr =
1309 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1310 pp->crqb[in_index].sg_addr_hi =
1311 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1312 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1313
1314 cw = &pp->crqb[in_index].ata_cmd[0];
1315 tf = &qc->tf;
1316
1317 /* Sadly, the CRQB cannot accomodate all registers--there are
1318 * only 11 bytes...so we must pick and choose required
1319 * registers based on the command. So, we drop feature and
1320 * hob_feature for [RW] DMA commands, but they are needed for
1321 * NCQ. NCQ will drop hob_nsect.
1322 */
1323 switch (tf->command) {
1324 case ATA_CMD_READ:
1325 case ATA_CMD_READ_EXT:
1326 case ATA_CMD_WRITE:
1327 case ATA_CMD_WRITE_EXT:
1328 case ATA_CMD_WRITE_FUA_EXT:
1329 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1330 break;
1331 case ATA_CMD_FPDMA_READ:
1332 case ATA_CMD_FPDMA_WRITE:
1333 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1334 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1335 break;
1336 default:
1337 /* The only other commands EDMA supports in non-queued and
1338 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1339 * of which are defined/used by Linux. If we get here, this
1340 * driver needs work.
1341 *
1342 * FIXME: modify libata to give qc_prep a return value and
1343 * return error here.
1344 */
1345 BUG_ON(tf->command);
1346 break;
1347 }
1348 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1349 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1350 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1351 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1352 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1353 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1354 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1355 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1356 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1357
1358 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1359 return;
1360 mv_fill_sg(qc);
1361 }
1362
1363 /**
1364 * mv_qc_prep_iie - Host specific command preparation.
1365 * @qc: queued command to prepare
1366 *
1367 * This routine simply redirects to the general purpose routine
1368 * if command is not DMA. Else, it handles prep of the CRQB
1369 * (command request block), does some sanity checking, and calls
1370 * the SG load routine.
1371 *
1372 * LOCKING:
1373 * Inherited from caller.
1374 */
1375 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1376 {
1377 struct ata_port *ap = qc->ap;
1378 struct mv_port_priv *pp = ap->private_data;
1379 struct mv_crqb_iie *crqb;
1380 struct ata_taskfile *tf;
1381 unsigned in_index;
1382 u32 flags = 0;
1383
1384 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1385 (qc->tf.protocol != ATA_PROT_NCQ))
1386 return;
1387
1388 /* Fill in Gen IIE command request block
1389 */
1390 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1391 flags |= CRQB_FLAG_READ;
1392
1393 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1394 flags |= qc->tag << CRQB_TAG_SHIFT;
1395 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1396
1397 /* get current queue index from software */
1398 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1399
1400 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1401 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1402 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1403 crqb->flags = cpu_to_le32(flags);
1404
1405 tf = &qc->tf;
1406 crqb->ata_cmd[0] = cpu_to_le32(
1407 (tf->command << 16) |
1408 (tf->feature << 24)
1409 );
1410 crqb->ata_cmd[1] = cpu_to_le32(
1411 (tf->lbal << 0) |
1412 (tf->lbam << 8) |
1413 (tf->lbah << 16) |
1414 (tf->device << 24)
1415 );
1416 crqb->ata_cmd[2] = cpu_to_le32(
1417 (tf->hob_lbal << 0) |
1418 (tf->hob_lbam << 8) |
1419 (tf->hob_lbah << 16) |
1420 (tf->hob_feature << 24)
1421 );
1422 crqb->ata_cmd[3] = cpu_to_le32(
1423 (tf->nsect << 0) |
1424 (tf->hob_nsect << 8)
1425 );
1426
1427 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1428 return;
1429 mv_fill_sg(qc);
1430 }
1431
1432 /**
1433 * mv_qc_issue - Initiate a command to the host
1434 * @qc: queued command to start
1435 *
1436 * This routine simply redirects to the general purpose routine
1437 * if command is not DMA. Else, it sanity checks our local
1438 * caches of the request producer/consumer indices then enables
1439 * DMA and bumps the request producer index.
1440 *
1441 * LOCKING:
1442 * Inherited from caller.
1443 */
1444 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1445 {
1446 struct ata_port *ap = qc->ap;
1447 void __iomem *port_mmio = mv_ap_base(ap);
1448 struct mv_port_priv *pp = ap->private_data;
1449 u32 in_index;
1450
1451 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1452 (qc->tf.protocol != ATA_PROT_NCQ)) {
1453 /* We're about to send a non-EDMA capable command to the
1454 * port. Turn off EDMA so there won't be problems accessing
1455 * shadow block, etc registers.
1456 */
1457 __mv_stop_dma(ap);
1458 return ata_qc_issue_prot(qc);
1459 }
1460
1461 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1462
1463 pp->req_idx++;
1464
1465 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1466
1467 /* and write the request in pointer to kick the EDMA to life */
1468 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1469 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1470
1471 return 0;
1472 }
1473
1474 /**
1475 * mv_err_intr - Handle error interrupts on the port
1476 * @ap: ATA channel to manipulate
1477 * @reset_allowed: bool: 0 == don't trigger from reset here
1478 *
1479 * In most cases, just clear the interrupt and move on. However,
1480 * some cases require an eDMA reset, which is done right before
1481 * the COMRESET in mv_phy_reset(). The SERR case requires a
1482 * clear of pending errors in the SATA SERROR register. Finally,
1483 * if the port disabled DMA, update our cached copy to match.
1484 *
1485 * LOCKING:
1486 * Inherited from caller.
1487 */
1488 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1489 {
1490 void __iomem *port_mmio = mv_ap_base(ap);
1491 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1492 struct mv_port_priv *pp = ap->private_data;
1493 struct mv_host_priv *hpriv = ap->host->private_data;
1494 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1495 unsigned int action = 0, err_mask = 0;
1496 struct ata_eh_info *ehi = &ap->link.eh_info;
1497
1498 ata_ehi_clear_desc(ehi);
1499
1500 if (!edma_enabled) {
1501 /* just a guess: do we need to do this? should we
1502 * expand this, and do it in all cases?
1503 */
1504 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1505 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1506 }
1507
1508 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1509
1510 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1511
1512 /*
1513 * all generations share these EDMA error cause bits
1514 */
1515
1516 if (edma_err_cause & EDMA_ERR_DEV)
1517 err_mask |= AC_ERR_DEV;
1518 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1519 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1520 EDMA_ERR_INTRL_PAR)) {
1521 err_mask |= AC_ERR_ATA_BUS;
1522 action |= ATA_EH_RESET;
1523 ata_ehi_push_desc(ehi, "parity error");
1524 }
1525 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1526 ata_ehi_hotplugged(ehi);
1527 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1528 "dev disconnect" : "dev connect");
1529 action |= ATA_EH_RESET;
1530 }
1531
1532 if (IS_GEN_I(hpriv)) {
1533 eh_freeze_mask = EDMA_EH_FREEZE_5;
1534
1535 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1536 pp = ap->private_data;
1537 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1538 ata_ehi_push_desc(ehi, "EDMA self-disable");
1539 }
1540 } else {
1541 eh_freeze_mask = EDMA_EH_FREEZE;
1542
1543 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1544 pp = ap->private_data;
1545 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1546 ata_ehi_push_desc(ehi, "EDMA self-disable");
1547 }
1548
1549 if (edma_err_cause & EDMA_ERR_SERR) {
1550 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1551 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1552 err_mask = AC_ERR_ATA_BUS;
1553 action |= ATA_EH_RESET;
1554 }
1555 }
1556
1557 /* Clear EDMA now that SERR cleanup done */
1558 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1559
1560 if (!err_mask) {
1561 err_mask = AC_ERR_OTHER;
1562 action |= ATA_EH_RESET;
1563 }
1564
1565 ehi->serror |= serr;
1566 ehi->action |= action;
1567
1568 if (qc)
1569 qc->err_mask |= err_mask;
1570 else
1571 ehi->err_mask |= err_mask;
1572
1573 if (edma_err_cause & eh_freeze_mask)
1574 ata_port_freeze(ap);
1575 else
1576 ata_port_abort(ap);
1577 }
1578
1579 static void mv_intr_pio(struct ata_port *ap)
1580 {
1581 struct ata_queued_cmd *qc;
1582 u8 ata_status;
1583
1584 /* ignore spurious intr if drive still BUSY */
1585 ata_status = readb(ap->ioaddr.status_addr);
1586 if (unlikely(ata_status & ATA_BUSY))
1587 return;
1588
1589 /* get active ATA command */
1590 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1591 if (unlikely(!qc)) /* no active tag */
1592 return;
1593 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1594 return;
1595
1596 /* and finally, complete the ATA command */
1597 qc->err_mask |= ac_err_mask(ata_status);
1598 ata_qc_complete(qc);
1599 }
1600
1601 static void mv_intr_edma(struct ata_port *ap)
1602 {
1603 void __iomem *port_mmio = mv_ap_base(ap);
1604 struct mv_host_priv *hpriv = ap->host->private_data;
1605 struct mv_port_priv *pp = ap->private_data;
1606 struct ata_queued_cmd *qc;
1607 u32 out_index, in_index;
1608 bool work_done = false;
1609
1610 /* get h/w response queue pointer */
1611 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1612 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1613
1614 while (1) {
1615 u16 status;
1616 unsigned int tag;
1617
1618 /* get s/w response queue last-read pointer, and compare */
1619 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1620 if (in_index == out_index)
1621 break;
1622
1623 /* 50xx: get active ATA command */
1624 if (IS_GEN_I(hpriv))
1625 tag = ap->link.active_tag;
1626
1627 /* Gen II/IIE: get active ATA command via tag, to enable
1628 * support for queueing. this works transparently for
1629 * queued and non-queued modes.
1630 */
1631 else
1632 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1633
1634 qc = ata_qc_from_tag(ap, tag);
1635
1636 /* For non-NCQ mode, the lower 8 bits of status
1637 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1638 * which should be zero if all went well.
1639 */
1640 status = le16_to_cpu(pp->crpb[out_index].flags);
1641 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1642 mv_err_intr(ap, qc);
1643 return;
1644 }
1645
1646 /* and finally, complete the ATA command */
1647 if (qc) {
1648 qc->err_mask |=
1649 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1650 ata_qc_complete(qc);
1651 }
1652
1653 /* advance software response queue pointer, to
1654 * indicate (after the loop completes) to hardware
1655 * that we have consumed a response queue entry.
1656 */
1657 work_done = true;
1658 pp->resp_idx++;
1659 }
1660
1661 if (work_done)
1662 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1663 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1664 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1665 }
1666
1667 /**
1668 * mv_host_intr - Handle all interrupts on the given host controller
1669 * @host: host specific structure
1670 * @relevant: port error bits relevant to this host controller
1671 * @hc: which host controller we're to look at
1672 *
1673 * Read then write clear the HC interrupt status then walk each
1674 * port connected to the HC and see if it needs servicing. Port
1675 * success ints are reported in the HC interrupt status reg, the
1676 * port error ints are reported in the higher level main
1677 * interrupt status register and thus are passed in via the
1678 * 'relevant' argument.
1679 *
1680 * LOCKING:
1681 * Inherited from caller.
1682 */
1683 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1684 {
1685 struct mv_host_priv *hpriv = host->private_data;
1686 void __iomem *mmio = hpriv->base;
1687 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1688 u32 hc_irq_cause;
1689 int port, port0, last_port;
1690
1691 if (hc == 0)
1692 port0 = 0;
1693 else
1694 port0 = MV_PORTS_PER_HC;
1695
1696 if (HAS_PCI(host))
1697 last_port = port0 + MV_PORTS_PER_HC;
1698 else
1699 last_port = port0 + hpriv->n_ports;
1700 /* we'll need the HC success int register in most cases */
1701 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1702 if (!hc_irq_cause)
1703 return;
1704
1705 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1706
1707 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1708 hc, relevant, hc_irq_cause);
1709
1710 for (port = port0; port < last_port; port++) {
1711 struct ata_port *ap = host->ports[port];
1712 struct mv_port_priv *pp;
1713 int have_err_bits, hard_port, shift;
1714
1715 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1716 continue;
1717
1718 pp = ap->private_data;
1719
1720 shift = port << 1; /* (port * 2) */
1721 if (port >= MV_PORTS_PER_HC) {
1722 shift++; /* skip bit 8 in the HC Main IRQ reg */
1723 }
1724 have_err_bits = ((PORT0_ERR << shift) & relevant);
1725
1726 if (unlikely(have_err_bits)) {
1727 struct ata_queued_cmd *qc;
1728
1729 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1730 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1731 continue;
1732
1733 mv_err_intr(ap, qc);
1734 continue;
1735 }
1736
1737 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1738
1739 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1740 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1741 mv_intr_edma(ap);
1742 } else {
1743 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1744 mv_intr_pio(ap);
1745 }
1746 }
1747 VPRINTK("EXIT\n");
1748 }
1749
1750 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1751 {
1752 struct mv_host_priv *hpriv = host->private_data;
1753 struct ata_port *ap;
1754 struct ata_queued_cmd *qc;
1755 struct ata_eh_info *ehi;
1756 unsigned int i, err_mask, printed = 0;
1757 u32 err_cause;
1758
1759 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1760
1761 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1762 err_cause);
1763
1764 DPRINTK("All regs @ PCI error\n");
1765 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1766
1767 writelfl(0, mmio + hpriv->irq_cause_ofs);
1768
1769 for (i = 0; i < host->n_ports; i++) {
1770 ap = host->ports[i];
1771 if (!ata_link_offline(&ap->link)) {
1772 ehi = &ap->link.eh_info;
1773 ata_ehi_clear_desc(ehi);
1774 if (!printed++)
1775 ata_ehi_push_desc(ehi,
1776 "PCI err cause 0x%08x", err_cause);
1777 err_mask = AC_ERR_HOST_BUS;
1778 ehi->action = ATA_EH_RESET;
1779 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1780 if (qc)
1781 qc->err_mask |= err_mask;
1782 else
1783 ehi->err_mask |= err_mask;
1784
1785 ata_port_freeze(ap);
1786 }
1787 }
1788 }
1789
1790 /**
1791 * mv_interrupt - Main interrupt event handler
1792 * @irq: unused
1793 * @dev_instance: private data; in this case the host structure
1794 *
1795 * Read the read only register to determine if any host
1796 * controllers have pending interrupts. If so, call lower level
1797 * routine to handle. Also check for PCI errors which are only
1798 * reported here.
1799 *
1800 * LOCKING:
1801 * This routine holds the host lock while processing pending
1802 * interrupts.
1803 */
1804 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1805 {
1806 struct ata_host *host = dev_instance;
1807 struct mv_host_priv *hpriv = host->private_data;
1808 unsigned int hc, handled = 0, n_hcs;
1809 void __iomem *mmio = hpriv->base;
1810 u32 irq_stat, irq_mask;
1811
1812 spin_lock(&host->lock);
1813
1814 irq_stat = readl(hpriv->main_cause_reg_addr);
1815 irq_mask = readl(hpriv->main_mask_reg_addr);
1816
1817 /* check the cases where we either have nothing pending or have read
1818 * a bogus register value which can indicate HW removal or PCI fault
1819 */
1820 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1821 goto out_unlock;
1822
1823 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1824
1825 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1826 mv_pci_error(host, mmio);
1827 handled = 1;
1828 goto out_unlock; /* skip all other HC irq handling */
1829 }
1830
1831 for (hc = 0; hc < n_hcs; hc++) {
1832 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1833 if (relevant) {
1834 mv_host_intr(host, relevant, hc);
1835 handled = 1;
1836 }
1837 }
1838
1839 out_unlock:
1840 spin_unlock(&host->lock);
1841
1842 return IRQ_RETVAL(handled);
1843 }
1844
1845 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1846 {
1847 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1848 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1849
1850 return hc_mmio + ofs;
1851 }
1852
1853 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1854 {
1855 unsigned int ofs;
1856
1857 switch (sc_reg_in) {
1858 case SCR_STATUS:
1859 case SCR_ERROR:
1860 case SCR_CONTROL:
1861 ofs = sc_reg_in * sizeof(u32);
1862 break;
1863 default:
1864 ofs = 0xffffffffU;
1865 break;
1866 }
1867 return ofs;
1868 }
1869
1870 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1871 {
1872 struct mv_host_priv *hpriv = ap->host->private_data;
1873 void __iomem *mmio = hpriv->base;
1874 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1875 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1876
1877 if (ofs != 0xffffffffU) {
1878 *val = readl(addr + ofs);
1879 return 0;
1880 } else
1881 return -EINVAL;
1882 }
1883
1884 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1885 {
1886 struct mv_host_priv *hpriv = ap->host->private_data;
1887 void __iomem *mmio = hpriv->base;
1888 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1889 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1890
1891 if (ofs != 0xffffffffU) {
1892 writelfl(val, addr + ofs);
1893 return 0;
1894 } else
1895 return -EINVAL;
1896 }
1897
1898 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1899 {
1900 struct pci_dev *pdev = to_pci_dev(host->dev);
1901 int early_5080;
1902
1903 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1904
1905 if (!early_5080) {
1906 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1907 tmp |= (1 << 0);
1908 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1909 }
1910
1911 mv_reset_pci_bus(host, mmio);
1912 }
1913
1914 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1915 {
1916 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1917 }
1918
1919 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1920 void __iomem *mmio)
1921 {
1922 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1923 u32 tmp;
1924
1925 tmp = readl(phy_mmio + MV5_PHY_MODE);
1926
1927 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1928 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1929 }
1930
1931 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1932 {
1933 u32 tmp;
1934
1935 writel(0, mmio + MV_GPIO_PORT_CTL);
1936
1937 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1938
1939 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1940 tmp |= ~(1 << 0);
1941 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1942 }
1943
1944 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1945 unsigned int port)
1946 {
1947 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1948 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1949 u32 tmp;
1950 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1951
1952 if (fix_apm_sq) {
1953 tmp = readl(phy_mmio + MV5_LT_MODE);
1954 tmp |= (1 << 19);
1955 writel(tmp, phy_mmio + MV5_LT_MODE);
1956
1957 tmp = readl(phy_mmio + MV5_PHY_CTL);
1958 tmp &= ~0x3;
1959 tmp |= 0x1;
1960 writel(tmp, phy_mmio + MV5_PHY_CTL);
1961 }
1962
1963 tmp = readl(phy_mmio + MV5_PHY_MODE);
1964 tmp &= ~mask;
1965 tmp |= hpriv->signal[port].pre;
1966 tmp |= hpriv->signal[port].amps;
1967 writel(tmp, phy_mmio + MV5_PHY_MODE);
1968 }
1969
1970
1971 #undef ZERO
1972 #define ZERO(reg) writel(0, port_mmio + (reg))
1973 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1974 unsigned int port)
1975 {
1976 void __iomem *port_mmio = mv_port_base(mmio, port);
1977
1978 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1979
1980 mv_channel_reset(hpriv, mmio, port);
1981
1982 ZERO(0x028); /* command */
1983 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1984 ZERO(0x004); /* timer */
1985 ZERO(0x008); /* irq err cause */
1986 ZERO(0x00c); /* irq err mask */
1987 ZERO(0x010); /* rq bah */
1988 ZERO(0x014); /* rq inp */
1989 ZERO(0x018); /* rq outp */
1990 ZERO(0x01c); /* respq bah */
1991 ZERO(0x024); /* respq outp */
1992 ZERO(0x020); /* respq inp */
1993 ZERO(0x02c); /* test control */
1994 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1995 }
1996 #undef ZERO
1997
1998 #define ZERO(reg) writel(0, hc_mmio + (reg))
1999 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2000 unsigned int hc)
2001 {
2002 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2003 u32 tmp;
2004
2005 ZERO(0x00c);
2006 ZERO(0x010);
2007 ZERO(0x014);
2008 ZERO(0x018);
2009
2010 tmp = readl(hc_mmio + 0x20);
2011 tmp &= 0x1c1c1c1c;
2012 tmp |= 0x03030303;
2013 writel(tmp, hc_mmio + 0x20);
2014 }
2015 #undef ZERO
2016
2017 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2018 unsigned int n_hc)
2019 {
2020 unsigned int hc, port;
2021
2022 for (hc = 0; hc < n_hc; hc++) {
2023 for (port = 0; port < MV_PORTS_PER_HC; port++)
2024 mv5_reset_hc_port(hpriv, mmio,
2025 (hc * MV_PORTS_PER_HC) + port);
2026
2027 mv5_reset_one_hc(hpriv, mmio, hc);
2028 }
2029
2030 return 0;
2031 }
2032
2033 #undef ZERO
2034 #define ZERO(reg) writel(0, mmio + (reg))
2035 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2036 {
2037 struct mv_host_priv *hpriv = host->private_data;
2038 u32 tmp;
2039
2040 tmp = readl(mmio + MV_PCI_MODE);
2041 tmp &= 0xff00ffff;
2042 writel(tmp, mmio + MV_PCI_MODE);
2043
2044 ZERO(MV_PCI_DISC_TIMER);
2045 ZERO(MV_PCI_MSI_TRIGGER);
2046 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2047 ZERO(HC_MAIN_IRQ_MASK_OFS);
2048 ZERO(MV_PCI_SERR_MASK);
2049 ZERO(hpriv->irq_cause_ofs);
2050 ZERO(hpriv->irq_mask_ofs);
2051 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2052 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2053 ZERO(MV_PCI_ERR_ATTRIBUTE);
2054 ZERO(MV_PCI_ERR_COMMAND);
2055 }
2056 #undef ZERO
2057
2058 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2059 {
2060 u32 tmp;
2061
2062 mv5_reset_flash(hpriv, mmio);
2063
2064 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2065 tmp &= 0x3;
2066 tmp |= (1 << 5) | (1 << 6);
2067 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2068 }
2069
2070 /**
2071 * mv6_reset_hc - Perform the 6xxx global soft reset
2072 * @mmio: base address of the HBA
2073 *
2074 * This routine only applies to 6xxx parts.
2075 *
2076 * LOCKING:
2077 * Inherited from caller.
2078 */
2079 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2080 unsigned int n_hc)
2081 {
2082 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2083 int i, rc = 0;
2084 u32 t;
2085
2086 /* Following procedure defined in PCI "main command and status
2087 * register" table.
2088 */
2089 t = readl(reg);
2090 writel(t | STOP_PCI_MASTER, reg);
2091
2092 for (i = 0; i < 1000; i++) {
2093 udelay(1);
2094 t = readl(reg);
2095 if (PCI_MASTER_EMPTY & t)
2096 break;
2097 }
2098 if (!(PCI_MASTER_EMPTY & t)) {
2099 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2100 rc = 1;
2101 goto done;
2102 }
2103
2104 /* set reset */
2105 i = 5;
2106 do {
2107 writel(t | GLOB_SFT_RST, reg);
2108 t = readl(reg);
2109 udelay(1);
2110 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2111
2112 if (!(GLOB_SFT_RST & t)) {
2113 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2114 rc = 1;
2115 goto done;
2116 }
2117
2118 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2119 i = 5;
2120 do {
2121 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2122 t = readl(reg);
2123 udelay(1);
2124 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2125
2126 if (GLOB_SFT_RST & t) {
2127 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2128 rc = 1;
2129 }
2130 done:
2131 return rc;
2132 }
2133
2134 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2135 void __iomem *mmio)
2136 {
2137 void __iomem *port_mmio;
2138 u32 tmp;
2139
2140 tmp = readl(mmio + MV_RESET_CFG);
2141 if ((tmp & (1 << 0)) == 0) {
2142 hpriv->signal[idx].amps = 0x7 << 8;
2143 hpriv->signal[idx].pre = 0x1 << 5;
2144 return;
2145 }
2146
2147 port_mmio = mv_port_base(mmio, idx);
2148 tmp = readl(port_mmio + PHY_MODE2);
2149
2150 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2151 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2152 }
2153
2154 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2155 {
2156 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2157 }
2158
2159 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2160 unsigned int port)
2161 {
2162 void __iomem *port_mmio = mv_port_base(mmio, port);
2163
2164 u32 hp_flags = hpriv->hp_flags;
2165 int fix_phy_mode2 =
2166 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2167 int fix_phy_mode4 =
2168 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2169 u32 m2, tmp;
2170
2171 if (fix_phy_mode2) {
2172 m2 = readl(port_mmio + PHY_MODE2);
2173 m2 &= ~(1 << 16);
2174 m2 |= (1 << 31);
2175 writel(m2, port_mmio + PHY_MODE2);
2176
2177 udelay(200);
2178
2179 m2 = readl(port_mmio + PHY_MODE2);
2180 m2 &= ~((1 << 16) | (1 << 31));
2181 writel(m2, port_mmio + PHY_MODE2);
2182
2183 udelay(200);
2184 }
2185
2186 /* who knows what this magic does */
2187 tmp = readl(port_mmio + PHY_MODE3);
2188 tmp &= ~0x7F800000;
2189 tmp |= 0x2A800000;
2190 writel(tmp, port_mmio + PHY_MODE3);
2191
2192 if (fix_phy_mode4) {
2193 u32 m4;
2194
2195 m4 = readl(port_mmio + PHY_MODE4);
2196
2197 if (hp_flags & MV_HP_ERRATA_60X1B2)
2198 tmp = readl(port_mmio + 0x310);
2199
2200 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2201
2202 writel(m4, port_mmio + PHY_MODE4);
2203
2204 if (hp_flags & MV_HP_ERRATA_60X1B2)
2205 writel(tmp, port_mmio + 0x310);
2206 }
2207
2208 /* Revert values of pre-emphasis and signal amps to the saved ones */
2209 m2 = readl(port_mmio + PHY_MODE2);
2210
2211 m2 &= ~MV_M2_PREAMP_MASK;
2212 m2 |= hpriv->signal[port].amps;
2213 m2 |= hpriv->signal[port].pre;
2214 m2 &= ~(1 << 16);
2215
2216 /* according to mvSata 3.6.1, some IIE values are fixed */
2217 if (IS_GEN_IIE(hpriv)) {
2218 m2 &= ~0xC30FF01F;
2219 m2 |= 0x0000900F;
2220 }
2221
2222 writel(m2, port_mmio + PHY_MODE2);
2223 }
2224
2225 /* TODO: use the generic LED interface to configure the SATA Presence */
2226 /* & Acitivy LEDs on the board */
2227 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2228 void __iomem *mmio)
2229 {
2230 return;
2231 }
2232
2233 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2234 void __iomem *mmio)
2235 {
2236 void __iomem *port_mmio;
2237 u32 tmp;
2238
2239 port_mmio = mv_port_base(mmio, idx);
2240 tmp = readl(port_mmio + PHY_MODE2);
2241
2242 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2243 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2244 }
2245
2246 #undef ZERO
2247 #define ZERO(reg) writel(0, port_mmio + (reg))
2248 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2249 void __iomem *mmio, unsigned int port)
2250 {
2251 void __iomem *port_mmio = mv_port_base(mmio, port);
2252
2253 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2254
2255 mv_channel_reset(hpriv, mmio, port);
2256
2257 ZERO(0x028); /* command */
2258 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2259 ZERO(0x004); /* timer */
2260 ZERO(0x008); /* irq err cause */
2261 ZERO(0x00c); /* irq err mask */
2262 ZERO(0x010); /* rq bah */
2263 ZERO(0x014); /* rq inp */
2264 ZERO(0x018); /* rq outp */
2265 ZERO(0x01c); /* respq bah */
2266 ZERO(0x024); /* respq outp */
2267 ZERO(0x020); /* respq inp */
2268 ZERO(0x02c); /* test control */
2269 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2270 }
2271
2272 #undef ZERO
2273
2274 #define ZERO(reg) writel(0, hc_mmio + (reg))
2275 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2276 void __iomem *mmio)
2277 {
2278 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2279
2280 ZERO(0x00c);
2281 ZERO(0x010);
2282 ZERO(0x014);
2283
2284 }
2285
2286 #undef ZERO
2287
2288 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2289 void __iomem *mmio, unsigned int n_hc)
2290 {
2291 unsigned int port;
2292
2293 for (port = 0; port < hpriv->n_ports; port++)
2294 mv_soc_reset_hc_port(hpriv, mmio, port);
2295
2296 mv_soc_reset_one_hc(hpriv, mmio);
2297
2298 return 0;
2299 }
2300
2301 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2302 void __iomem *mmio)
2303 {
2304 return;
2305 }
2306
2307 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2308 {
2309 return;
2310 }
2311
2312 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2313 unsigned int port_no)
2314 {
2315 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2316
2317 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2318
2319 if (IS_GEN_II(hpriv)) {
2320 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2321 ifctl |= (1 << 7); /* enable gen2i speed */
2322 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2323 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2324 }
2325
2326 udelay(25); /* allow reset propagation */
2327
2328 /* Spec never mentions clearing the bit. Marvell's driver does
2329 * clear the bit, however.
2330 */
2331 writelfl(0, port_mmio + EDMA_CMD_OFS);
2332
2333 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2334
2335 if (IS_GEN_I(hpriv))
2336 mdelay(1);
2337 }
2338
2339 /**
2340 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2341 * @ap: ATA channel to manipulate
2342 *
2343 * Part of this is taken from __sata_phy_reset and modified to
2344 * not sleep since this routine gets called from interrupt level.
2345 *
2346 * LOCKING:
2347 * Inherited from caller. This is coded to safe to call at
2348 * interrupt level, i.e. it does not sleep.
2349 */
2350 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2351 unsigned long deadline)
2352 {
2353 struct mv_port_priv *pp = ap->private_data;
2354 struct mv_host_priv *hpriv = ap->host->private_data;
2355 void __iomem *port_mmio = mv_ap_base(ap);
2356 int retry = 5;
2357 u32 sstatus;
2358
2359 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2360
2361 #ifdef DEBUG
2362 {
2363 u32 sstatus, serror, scontrol;
2364
2365 mv_scr_read(ap, SCR_STATUS, &sstatus);
2366 mv_scr_read(ap, SCR_ERROR, &serror);
2367 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2368 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2369 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2370 }
2371 #endif
2372
2373 /* Issue COMRESET via SControl */
2374 comreset_retry:
2375 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2376 msleep(1);
2377
2378 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2379 msleep(20);
2380
2381 do {
2382 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2383 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2384 break;
2385
2386 msleep(1);
2387 } while (time_before(jiffies, deadline));
2388
2389 /* work around errata */
2390 if (IS_GEN_II(hpriv) &&
2391 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2392 (retry-- > 0))
2393 goto comreset_retry;
2394
2395 #ifdef DEBUG
2396 {
2397 u32 sstatus, serror, scontrol;
2398
2399 mv_scr_read(ap, SCR_STATUS, &sstatus);
2400 mv_scr_read(ap, SCR_ERROR, &serror);
2401 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2402 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2403 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2404 }
2405 #endif
2406
2407 if (ata_link_offline(&ap->link)) {
2408 *class = ATA_DEV_NONE;
2409 return;
2410 }
2411
2412 /* even after SStatus reflects that device is ready,
2413 * it seems to take a while for link to be fully
2414 * established (and thus Status no longer 0x80/0x7F),
2415 * so we poll a bit for that, here.
2416 */
2417 retry = 20;
2418 while (1) {
2419 u8 drv_stat = ata_check_status(ap);
2420 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2421 break;
2422 msleep(500);
2423 if (retry-- <= 0)
2424 break;
2425 if (time_after(jiffies, deadline))
2426 break;
2427 }
2428
2429 /* FIXME: if we passed the deadline, the following
2430 * code probably produces an invalid result
2431 */
2432
2433 /* finally, read device signature from TF registers */
2434 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2435
2436 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2437
2438 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2439
2440 VPRINTK("EXIT\n");
2441 }
2442
2443 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2444 {
2445 struct ata_port *ap = link->ap;
2446 struct mv_port_priv *pp = ap->private_data;
2447
2448 mv_stop_dma(ap);
2449
2450 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
2451 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2452
2453 return 0;
2454 }
2455
2456 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2457 unsigned long deadline)
2458 {
2459 struct ata_port *ap = link->ap;
2460 struct mv_host_priv *hpriv = ap->host->private_data;
2461 void __iomem *mmio = hpriv->base;
2462
2463 mv_stop_dma(ap);
2464
2465 mv_channel_reset(hpriv, mmio, ap->port_no);
2466
2467 mv_phy_reset(ap, class, deadline);
2468
2469 return 0;
2470 }
2471
2472 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2473 {
2474 struct ata_port *ap = link->ap;
2475 u32 serr;
2476
2477 /* print link status */
2478 sata_print_link_status(link);
2479
2480 /* clear SError */
2481 sata_scr_read(link, SCR_ERROR, &serr);
2482 sata_scr_write_flush(link, SCR_ERROR, serr);
2483
2484 /* bail out if no device is present */
2485 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2486 DPRINTK("EXIT, no device\n");
2487 return;
2488 }
2489
2490 /* set up device control */
2491 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2492 }
2493
2494 static void mv_error_handler(struct ata_port *ap)
2495 {
2496 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2497 mv_hardreset, mv_postreset);
2498 }
2499
2500 static void mv_eh_freeze(struct ata_port *ap)
2501 {
2502 struct mv_host_priv *hpriv = ap->host->private_data;
2503 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2504 u32 tmp, mask;
2505 unsigned int shift;
2506
2507 /* FIXME: handle coalescing completion events properly */
2508
2509 shift = ap->port_no * 2;
2510 if (hc > 0)
2511 shift++;
2512
2513 mask = 0x3 << shift;
2514
2515 /* disable assertion of portN err, done events */
2516 tmp = readl(hpriv->main_mask_reg_addr);
2517 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2518 }
2519
2520 static void mv_eh_thaw(struct ata_port *ap)
2521 {
2522 struct mv_host_priv *hpriv = ap->host->private_data;
2523 void __iomem *mmio = hpriv->base;
2524 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2525 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2526 void __iomem *port_mmio = mv_ap_base(ap);
2527 u32 tmp, mask, hc_irq_cause;
2528 unsigned int shift, hc_port_no = ap->port_no;
2529
2530 /* FIXME: handle coalescing completion events properly */
2531
2532 shift = ap->port_no * 2;
2533 if (hc > 0) {
2534 shift++;
2535 hc_port_no -= 4;
2536 }
2537
2538 mask = 0x3 << shift;
2539
2540 /* clear EDMA errors on this port */
2541 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2542
2543 /* clear pending irq events */
2544 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2545 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2546 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2547 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2548
2549 /* enable assertion of portN err, done events */
2550 tmp = readl(hpriv->main_mask_reg_addr);
2551 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2552 }
2553
2554 /**
2555 * mv_port_init - Perform some early initialization on a single port.
2556 * @port: libata data structure storing shadow register addresses
2557 * @port_mmio: base address of the port
2558 *
2559 * Initialize shadow register mmio addresses, clear outstanding
2560 * interrupts on the port, and unmask interrupts for the future
2561 * start of the port.
2562 *
2563 * LOCKING:
2564 * Inherited from caller.
2565 */
2566 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2567 {
2568 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2569 unsigned serr_ofs;
2570
2571 /* PIO related setup
2572 */
2573 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2574 port->error_addr =
2575 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2576 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2577 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2578 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2579 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2580 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2581 port->status_addr =
2582 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2583 /* special case: control/altstatus doesn't have ATA_REG_ address */
2584 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2585
2586 /* unused: */
2587 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2588
2589 /* Clear any currently outstanding port interrupt conditions */
2590 serr_ofs = mv_scr_offset(SCR_ERROR);
2591 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2592 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2593
2594 /* unmask all non-transient EDMA error interrupts */
2595 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2596
2597 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2598 readl(port_mmio + EDMA_CFG_OFS),
2599 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2600 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2601 }
2602
2603 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2604 {
2605 struct pci_dev *pdev = to_pci_dev(host->dev);
2606 struct mv_host_priv *hpriv = host->private_data;
2607 u32 hp_flags = hpriv->hp_flags;
2608
2609 switch (board_idx) {
2610 case chip_5080:
2611 hpriv->ops = &mv5xxx_ops;
2612 hp_flags |= MV_HP_GEN_I;
2613
2614 switch (pdev->revision) {
2615 case 0x1:
2616 hp_flags |= MV_HP_ERRATA_50XXB0;
2617 break;
2618 case 0x3:
2619 hp_flags |= MV_HP_ERRATA_50XXB2;
2620 break;
2621 default:
2622 dev_printk(KERN_WARNING, &pdev->dev,
2623 "Applying 50XXB2 workarounds to unknown rev\n");
2624 hp_flags |= MV_HP_ERRATA_50XXB2;
2625 break;
2626 }
2627 break;
2628
2629 case chip_504x:
2630 case chip_508x:
2631 hpriv->ops = &mv5xxx_ops;
2632 hp_flags |= MV_HP_GEN_I;
2633
2634 switch (pdev->revision) {
2635 case 0x0:
2636 hp_flags |= MV_HP_ERRATA_50XXB0;
2637 break;
2638 case 0x3:
2639 hp_flags |= MV_HP_ERRATA_50XXB2;
2640 break;
2641 default:
2642 dev_printk(KERN_WARNING, &pdev->dev,
2643 "Applying B2 workarounds to unknown rev\n");
2644 hp_flags |= MV_HP_ERRATA_50XXB2;
2645 break;
2646 }
2647 break;
2648
2649 case chip_604x:
2650 case chip_608x:
2651 hpriv->ops = &mv6xxx_ops;
2652 hp_flags |= MV_HP_GEN_II;
2653
2654 switch (pdev->revision) {
2655 case 0x7:
2656 hp_flags |= MV_HP_ERRATA_60X1B2;
2657 break;
2658 case 0x9:
2659 hp_flags |= MV_HP_ERRATA_60X1C0;
2660 break;
2661 default:
2662 dev_printk(KERN_WARNING, &pdev->dev,
2663 "Applying B2 workarounds to unknown rev\n");
2664 hp_flags |= MV_HP_ERRATA_60X1B2;
2665 break;
2666 }
2667 break;
2668
2669 case chip_7042:
2670 hp_flags |= MV_HP_PCIE;
2671 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2672 (pdev->device == 0x2300 || pdev->device == 0x2310))
2673 {
2674 /*
2675 * Highpoint RocketRAID PCIe 23xx series cards:
2676 *
2677 * Unconfigured drives are treated as "Legacy"
2678 * by the BIOS, and it overwrites sector 8 with
2679 * a "Lgcy" metadata block prior to Linux boot.
2680 *
2681 * Configured drives (RAID or JBOD) leave sector 8
2682 * alone, but instead overwrite a high numbered
2683 * sector for the RAID metadata. This sector can
2684 * be determined exactly, by truncating the physical
2685 * drive capacity to a nice even GB value.
2686 *
2687 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2688 *
2689 * Warn the user, lest they think we're just buggy.
2690 */
2691 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2692 " BIOS CORRUPTS DATA on all attached drives,"
2693 " regardless of if/how they are configured."
2694 " BEWARE!\n");
2695 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2696 " use sectors 8-9 on \"Legacy\" drives,"
2697 " and avoid the final two gigabytes on"
2698 " all RocketRAID BIOS initialized drives.\n");
2699 }
2700 case chip_6042:
2701 hpriv->ops = &mv6xxx_ops;
2702 hp_flags |= MV_HP_GEN_IIE;
2703
2704 switch (pdev->revision) {
2705 case 0x0:
2706 hp_flags |= MV_HP_ERRATA_XX42A0;
2707 break;
2708 case 0x1:
2709 hp_flags |= MV_HP_ERRATA_60X1C0;
2710 break;
2711 default:
2712 dev_printk(KERN_WARNING, &pdev->dev,
2713 "Applying 60X1C0 workarounds to unknown rev\n");
2714 hp_flags |= MV_HP_ERRATA_60X1C0;
2715 break;
2716 }
2717 break;
2718 case chip_soc:
2719 hpriv->ops = &mv_soc_ops;
2720 hp_flags |= MV_HP_ERRATA_60X1C0;
2721 break;
2722
2723 default:
2724 dev_printk(KERN_ERR, host->dev,
2725 "BUG: invalid board index %u\n", board_idx);
2726 return 1;
2727 }
2728
2729 hpriv->hp_flags = hp_flags;
2730 if (hp_flags & MV_HP_PCIE) {
2731 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2732 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2733 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2734 } else {
2735 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2736 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2737 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2738 }
2739
2740 return 0;
2741 }
2742
2743 /**
2744 * mv_init_host - Perform some early initialization of the host.
2745 * @host: ATA host to initialize
2746 * @board_idx: controller index
2747 *
2748 * If possible, do an early global reset of the host. Then do
2749 * our port init and clear/unmask all/relevant host interrupts.
2750 *
2751 * LOCKING:
2752 * Inherited from caller.
2753 */
2754 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2755 {
2756 int rc = 0, n_hc, port, hc;
2757 struct mv_host_priv *hpriv = host->private_data;
2758 void __iomem *mmio = hpriv->base;
2759
2760 rc = mv_chip_id(host, board_idx);
2761 if (rc)
2762 goto done;
2763
2764 if (HAS_PCI(host)) {
2765 hpriv->main_cause_reg_addr = hpriv->base +
2766 HC_MAIN_IRQ_CAUSE_OFS;
2767 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2768 } else {
2769 hpriv->main_cause_reg_addr = hpriv->base +
2770 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2771 hpriv->main_mask_reg_addr = hpriv->base +
2772 HC_SOC_MAIN_IRQ_MASK_OFS;
2773 }
2774 /* global interrupt mask */
2775 writel(0, hpriv->main_mask_reg_addr);
2776
2777 n_hc = mv_get_hc_count(host->ports[0]->flags);
2778
2779 for (port = 0; port < host->n_ports; port++)
2780 hpriv->ops->read_preamp(hpriv, port, mmio);
2781
2782 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2783 if (rc)
2784 goto done;
2785
2786 hpriv->ops->reset_flash(hpriv, mmio);
2787 hpriv->ops->reset_bus(host, mmio);
2788 hpriv->ops->enable_leds(hpriv, mmio);
2789
2790 for (port = 0; port < host->n_ports; port++) {
2791 if (IS_GEN_II(hpriv)) {
2792 void __iomem *port_mmio = mv_port_base(mmio, port);
2793
2794 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2795 ifctl |= (1 << 7); /* enable gen2i speed */
2796 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2797 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2798 }
2799
2800 hpriv->ops->phy_errata(hpriv, mmio, port);
2801 }
2802
2803 for (port = 0; port < host->n_ports; port++) {
2804 struct ata_port *ap = host->ports[port];
2805 void __iomem *port_mmio = mv_port_base(mmio, port);
2806
2807 mv_port_init(&ap->ioaddr, port_mmio);
2808
2809 #ifdef CONFIG_PCI
2810 if (HAS_PCI(host)) {
2811 unsigned int offset = port_mmio - mmio;
2812 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2813 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2814 }
2815 #endif
2816 }
2817
2818 for (hc = 0; hc < n_hc; hc++) {
2819 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2820
2821 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2822 "(before clear)=0x%08x\n", hc,
2823 readl(hc_mmio + HC_CFG_OFS),
2824 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2825
2826 /* Clear any currently outstanding hc interrupt conditions */
2827 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2828 }
2829
2830 if (HAS_PCI(host)) {
2831 /* Clear any currently outstanding host interrupt conditions */
2832 writelfl(0, mmio + hpriv->irq_cause_ofs);
2833
2834 /* and unmask interrupt generation for host regs */
2835 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2836 if (IS_GEN_I(hpriv))
2837 writelfl(~HC_MAIN_MASKED_IRQS_5,
2838 hpriv->main_mask_reg_addr);
2839 else
2840 writelfl(~HC_MAIN_MASKED_IRQS,
2841 hpriv->main_mask_reg_addr);
2842
2843 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2844 "PCI int cause/mask=0x%08x/0x%08x\n",
2845 readl(hpriv->main_cause_reg_addr),
2846 readl(hpriv->main_mask_reg_addr),
2847 readl(mmio + hpriv->irq_cause_ofs),
2848 readl(mmio + hpriv->irq_mask_ofs));
2849 } else {
2850 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2851 hpriv->main_mask_reg_addr);
2852 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2853 readl(hpriv->main_cause_reg_addr),
2854 readl(hpriv->main_mask_reg_addr));
2855 }
2856 done:
2857 return rc;
2858 }
2859
2860 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2861 {
2862 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2863 MV_CRQB_Q_SZ, 0);
2864 if (!hpriv->crqb_pool)
2865 return -ENOMEM;
2866
2867 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2868 MV_CRPB_Q_SZ, 0);
2869 if (!hpriv->crpb_pool)
2870 return -ENOMEM;
2871
2872 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2873 MV_SG_TBL_SZ, 0);
2874 if (!hpriv->sg_tbl_pool)
2875 return -ENOMEM;
2876
2877 return 0;
2878 }
2879
2880 /**
2881 * mv_platform_probe - handle a positive probe of an soc Marvell
2882 * host
2883 * @pdev: platform device found
2884 *
2885 * LOCKING:
2886 * Inherited from caller.
2887 */
2888 static int mv_platform_probe(struct platform_device *pdev)
2889 {
2890 static int printed_version;
2891 const struct mv_sata_platform_data *mv_platform_data;
2892 const struct ata_port_info *ppi[] =
2893 { &mv_port_info[chip_soc], NULL };
2894 struct ata_host *host;
2895 struct mv_host_priv *hpriv;
2896 struct resource *res;
2897 int n_ports, rc;
2898
2899 if (!printed_version++)
2900 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2901
2902 /*
2903 * Simple resource validation ..
2904 */
2905 if (unlikely(pdev->num_resources != 2)) {
2906 dev_err(&pdev->dev, "invalid number of resources\n");
2907 return -EINVAL;
2908 }
2909
2910 /*
2911 * Get the register base first
2912 */
2913 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2914 if (res == NULL)
2915 return -EINVAL;
2916
2917 /* allocate host */
2918 mv_platform_data = pdev->dev.platform_data;
2919 n_ports = mv_platform_data->n_ports;
2920
2921 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2922 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2923
2924 if (!host || !hpriv)
2925 return -ENOMEM;
2926 host->private_data = hpriv;
2927 hpriv->n_ports = n_ports;
2928
2929 host->iomap = NULL;
2930 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2931 res->end - res->start + 1);
2932 hpriv->base -= MV_SATAHC0_REG_BASE;
2933
2934 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2935 if (rc)
2936 return rc;
2937
2938 /* initialize adapter */
2939 rc = mv_init_host(host, chip_soc);
2940 if (rc)
2941 return rc;
2942
2943 dev_printk(KERN_INFO, &pdev->dev,
2944 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2945 host->n_ports);
2946
2947 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2948 IRQF_SHARED, &mv6_sht);
2949 }
2950
2951 /*
2952 *
2953 * mv_platform_remove - unplug a platform interface
2954 * @pdev: platform device
2955 *
2956 * A platform bus SATA device has been unplugged. Perform the needed
2957 * cleanup. Also called on module unload for any active devices.
2958 */
2959 static int __devexit mv_platform_remove(struct platform_device *pdev)
2960 {
2961 struct device *dev = &pdev->dev;
2962 struct ata_host *host = dev_get_drvdata(dev);
2963
2964 ata_host_detach(host);
2965 return 0;
2966 }
2967
2968 static struct platform_driver mv_platform_driver = {
2969 .probe = mv_platform_probe,
2970 .remove = __devexit_p(mv_platform_remove),
2971 .driver = {
2972 .name = DRV_NAME,
2973 .owner = THIS_MODULE,
2974 },
2975 };
2976
2977
2978 #ifdef CONFIG_PCI
2979 static int mv_pci_init_one(struct pci_dev *pdev,
2980 const struct pci_device_id *ent);
2981
2982
2983 static struct pci_driver mv_pci_driver = {
2984 .name = DRV_NAME,
2985 .id_table = mv_pci_tbl,
2986 .probe = mv_pci_init_one,
2987 .remove = ata_pci_remove_one,
2988 };
2989
2990 /*
2991 * module options
2992 */
2993 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2994
2995
2996 /* move to PCI layer or libata core? */
2997 static int pci_go_64(struct pci_dev *pdev)
2998 {
2999 int rc;
3000
3001 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3002 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3003 if (rc) {
3004 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3005 if (rc) {
3006 dev_printk(KERN_ERR, &pdev->dev,
3007 "64-bit DMA enable failed\n");
3008 return rc;
3009 }
3010 }
3011 } else {
3012 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3013 if (rc) {
3014 dev_printk(KERN_ERR, &pdev->dev,
3015 "32-bit DMA enable failed\n");
3016 return rc;
3017 }
3018 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3019 if (rc) {
3020 dev_printk(KERN_ERR, &pdev->dev,
3021 "32-bit consistent DMA enable failed\n");
3022 return rc;
3023 }
3024 }
3025
3026 return rc;
3027 }
3028
3029 /**
3030 * mv_print_info - Dump key info to kernel log for perusal.
3031 * @host: ATA host to print info about
3032 *
3033 * FIXME: complete this.
3034 *
3035 * LOCKING:
3036 * Inherited from caller.
3037 */
3038 static void mv_print_info(struct ata_host *host)
3039 {
3040 struct pci_dev *pdev = to_pci_dev(host->dev);
3041 struct mv_host_priv *hpriv = host->private_data;
3042 u8 scc;
3043 const char *scc_s, *gen;
3044
3045 /* Use this to determine the HW stepping of the chip so we know
3046 * what errata to workaround
3047 */
3048 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3049 if (scc == 0)
3050 scc_s = "SCSI";
3051 else if (scc == 0x01)
3052 scc_s = "RAID";
3053 else
3054 scc_s = "?";
3055
3056 if (IS_GEN_I(hpriv))
3057 gen = "I";
3058 else if (IS_GEN_II(hpriv))
3059 gen = "II";
3060 else if (IS_GEN_IIE(hpriv))
3061 gen = "IIE";
3062 else
3063 gen = "?";
3064
3065 dev_printk(KERN_INFO, &pdev->dev,
3066 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3067 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3068 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3069 }
3070
3071 /**
3072 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3073 * @pdev: PCI device found
3074 * @ent: PCI device ID entry for the matched host
3075 *
3076 * LOCKING:
3077 * Inherited from caller.
3078 */
3079 static int mv_pci_init_one(struct pci_dev *pdev,
3080 const struct pci_device_id *ent)
3081 {
3082 static int printed_version;
3083 unsigned int board_idx = (unsigned int)ent->driver_data;
3084 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3085 struct ata_host *host;
3086 struct mv_host_priv *hpriv;
3087 int n_ports, rc;
3088
3089 if (!printed_version++)
3090 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3091
3092 /* allocate host */
3093 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3094
3095 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3096 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3097 if (!host || !hpriv)
3098 return -ENOMEM;
3099 host->private_data = hpriv;
3100 hpriv->n_ports = n_ports;
3101
3102 /* acquire resources */
3103 rc = pcim_enable_device(pdev);
3104 if (rc)
3105 return rc;
3106
3107 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3108 if (rc == -EBUSY)
3109 pcim_pin_device(pdev);
3110 if (rc)
3111 return rc;
3112 host->iomap = pcim_iomap_table(pdev);
3113 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3114
3115 rc = pci_go_64(pdev);
3116 if (rc)
3117 return rc;
3118
3119 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3120 if (rc)
3121 return rc;
3122
3123 /* initialize adapter */
3124 rc = mv_init_host(host, board_idx);
3125 if (rc)
3126 return rc;
3127
3128 /* Enable interrupts */
3129 if (msi && pci_enable_msi(pdev))
3130 pci_intx(pdev, 1);
3131
3132 mv_dump_pci_cfg(pdev, 0x68);
3133 mv_print_info(host);
3134
3135 pci_set_master(pdev);
3136 pci_try_set_mwi(pdev);
3137 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3138 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3139 }
3140 #endif
3141
3142 static int mv_platform_probe(struct platform_device *pdev);
3143 static int __devexit mv_platform_remove(struct platform_device *pdev);
3144
3145 static int __init mv_init(void)
3146 {
3147 int rc = -ENODEV;
3148 #ifdef CONFIG_PCI
3149 rc = pci_register_driver(&mv_pci_driver);
3150 if (rc < 0)
3151 return rc;
3152 #endif
3153 rc = platform_driver_register(&mv_platform_driver);
3154
3155 #ifdef CONFIG_PCI
3156 if (rc < 0)
3157 pci_unregister_driver(&mv_pci_driver);
3158 #endif
3159 return rc;
3160 }
3161
3162 static void __exit mv_exit(void)
3163 {
3164 #ifdef CONFIG_PCI
3165 pci_unregister_driver(&mv_pci_driver);
3166 #endif
3167 platform_driver_unregister(&mv_platform_driver);
3168 }
3169
3170 MODULE_AUTHOR("Brett Russ");
3171 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3172 MODULE_LICENSE("GPL");
3173 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3174 MODULE_VERSION(DRV_VERSION);
3175 MODULE_ALIAS("platform:sata_mv");
3176
3177 #ifdef CONFIG_PCI
3178 module_param(msi, int, 0444);
3179 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3180 #endif
3181
3182 module_init(mv_init);
3183 module_exit(mv_exit);
This page took 0.141196 seconds and 4 git commands to generate.