libata: implement and use ops inheritance
[deliverable/linux.git] / drivers / ata / sata_mv.c
1 /*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
62 */
63
64
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
81
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
84
85 enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_MAX_SG_CT = 256,
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
123
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
140
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
155 /* PCI interface registers */
156
157 PCI_COMMAND_OFS = 0xc00,
158
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
182
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
226 PHY_MODE3 = 0x310,
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
260
261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
268
269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
277
278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
293 EDMA_ERR_CRQB_PAR |
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
308 EDMA_ERR_CRQB_PAR |
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
312
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
328
329 EDMA_IORDY_TMOUT = 0x34,
330 EDMA_ARB_CFG = 0x38,
331
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
338 MV_HP_ERRATA_XX42A0 = (1 << 5),
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
343
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
348 };
349
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
354
355 enum {
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
360
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368 };
369
370 enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
376 chip_6042,
377 chip_7042,
378 chip_soc,
379 };
380
381 /* Command ReQuest Block: 32B */
382 struct mv_crqb {
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
387 };
388
389 struct mv_crqb_iie {
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
395 };
396
397 /* Command ResPonse Block: 8B */
398 struct mv_crpb {
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
402 };
403
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405 struct mv_sg {
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
410 };
411
412 struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
423 u32 pp_flags;
424 };
425
426 struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429 };
430
431 struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
450 };
451
452 struct mv_hw_ops {
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
462 };
463
464 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
468 static int mv_port_start(struct ata_port *ap);
469 static void mv_port_stop(struct ata_port *ap);
470 static void mv_qc_prep(struct ata_queued_cmd *qc);
471 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
472 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
473 static void mv_error_handler(struct ata_port *ap);
474 static void mv_eh_freeze(struct ata_port *ap);
475 static void mv_eh_thaw(struct ata_port *ap);
476 static void mv6_dev_config(struct ata_device *dev);
477
478 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
480 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
483 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
485 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
486 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
487
488 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489 unsigned int port);
490 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
492 void __iomem *mmio);
493 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int n_hc);
495 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
496 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
497 void __iomem *mmio);
498 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
499 void __iomem *mmio);
500 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501 void __iomem *mmio, unsigned int n_hc);
502 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
503 void __iomem *mmio);
504 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
505 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
506 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int port_no);
508 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509 void __iomem *port_mmio, int want_ncq);
510 static int __mv_stop_dma(struct ata_port *ap);
511
512 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
515 */
516 static struct scsi_host_template mv5_sht = {
517 ATA_BASE_SHT(DRV_NAME),
518 .sg_tablesize = MV_MAX_SG_CT / 2,
519 .dma_boundary = MV_DMA_BOUNDARY,
520 };
521
522 static struct scsi_host_template mv6_sht = {
523 ATA_NCQ_SHT(DRV_NAME),
524 .can_queue = MV_MAX_Q_DEPTH - 1,
525 .sg_tablesize = MV_MAX_SG_CT / 2,
526 .dma_boundary = MV_DMA_BOUNDARY,
527 };
528
529 static struct ata_port_operations mv5_ops = {
530 .inherits = &ata_sff_port_ops,
531
532 .qc_prep = mv_qc_prep,
533 .qc_issue = mv_qc_issue,
534
535 .freeze = mv_eh_freeze,
536 .thaw = mv_eh_thaw,
537 .error_handler = mv_error_handler,
538 .post_internal_cmd = ATA_OP_NULL,
539
540 .scr_read = mv5_scr_read,
541 .scr_write = mv5_scr_write,
542
543 .port_start = mv_port_start,
544 .port_stop = mv_port_stop,
545 };
546
547 static struct ata_port_operations mv6_ops = {
548 .inherits = &mv5_ops,
549 .qc_defer = ata_std_qc_defer,
550 .dev_config = mv6_dev_config,
551 .scr_read = mv_scr_read,
552 .scr_write = mv_scr_write,
553 };
554
555 static struct ata_port_operations mv_iie_ops = {
556 .inherits = &mv6_ops,
557 .dev_config = ATA_OP_NULL,
558 .qc_prep = mv_qc_prep_iie,
559 };
560
561 static const struct ata_port_info mv_port_info[] = {
562 { /* chip_504x */
563 .flags = MV_COMMON_FLAGS,
564 .pio_mask = 0x1f, /* pio0-4 */
565 .udma_mask = ATA_UDMA6,
566 .port_ops = &mv5_ops,
567 },
568 { /* chip_508x */
569 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
570 .pio_mask = 0x1f, /* pio0-4 */
571 .udma_mask = ATA_UDMA6,
572 .port_ops = &mv5_ops,
573 },
574 { /* chip_5080 */
575 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
576 .pio_mask = 0x1f, /* pio0-4 */
577 .udma_mask = ATA_UDMA6,
578 .port_ops = &mv5_ops,
579 },
580 { /* chip_604x */
581 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
582 ATA_FLAG_NCQ,
583 .pio_mask = 0x1f, /* pio0-4 */
584 .udma_mask = ATA_UDMA6,
585 .port_ops = &mv6_ops,
586 },
587 { /* chip_608x */
588 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
589 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
590 .pio_mask = 0x1f, /* pio0-4 */
591 .udma_mask = ATA_UDMA6,
592 .port_ops = &mv6_ops,
593 },
594 { /* chip_6042 */
595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
596 ATA_FLAG_NCQ,
597 .pio_mask = 0x1f, /* pio0-4 */
598 .udma_mask = ATA_UDMA6,
599 .port_ops = &mv_iie_ops,
600 },
601 { /* chip_7042 */
602 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
603 ATA_FLAG_NCQ,
604 .pio_mask = 0x1f, /* pio0-4 */
605 .udma_mask = ATA_UDMA6,
606 .port_ops = &mv_iie_ops,
607 },
608 { /* chip_soc */
609 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
610 .pio_mask = 0x1f, /* pio0-4 */
611 .udma_mask = ATA_UDMA6,
612 .port_ops = &mv_iie_ops,
613 },
614 };
615
616 static const struct pci_device_id mv_pci_tbl[] = {
617 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
618 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
619 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
620 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
621 /* RocketRAID 1740/174x have different identifiers */
622 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
623 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
624
625 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
627 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
628 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
629 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
630
631 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
632
633 /* Adaptec 1430SA */
634 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
635
636 /* Marvell 7042 support */
637 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
638
639 /* Highpoint RocketRAID PCIe series */
640 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
641 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
642
643 { } /* terminate list */
644 };
645
646 static const struct mv_hw_ops mv5xxx_ops = {
647 .phy_errata = mv5_phy_errata,
648 .enable_leds = mv5_enable_leds,
649 .read_preamp = mv5_read_preamp,
650 .reset_hc = mv5_reset_hc,
651 .reset_flash = mv5_reset_flash,
652 .reset_bus = mv5_reset_bus,
653 };
654
655 static const struct mv_hw_ops mv6xxx_ops = {
656 .phy_errata = mv6_phy_errata,
657 .enable_leds = mv6_enable_leds,
658 .read_preamp = mv6_read_preamp,
659 .reset_hc = mv6_reset_hc,
660 .reset_flash = mv6_reset_flash,
661 .reset_bus = mv_reset_pci_bus,
662 };
663
664 static const struct mv_hw_ops mv_soc_ops = {
665 .phy_errata = mv6_phy_errata,
666 .enable_leds = mv_soc_enable_leds,
667 .read_preamp = mv_soc_read_preamp,
668 .reset_hc = mv_soc_reset_hc,
669 .reset_flash = mv_soc_reset_flash,
670 .reset_bus = mv_soc_reset_bus,
671 };
672
673 /*
674 * Functions
675 */
676
677 static inline void writelfl(unsigned long data, void __iomem *addr)
678 {
679 writel(data, addr);
680 (void) readl(addr); /* flush to avoid PCI posted write */
681 }
682
683 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
684 {
685 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
686 }
687
688 static inline unsigned int mv_hc_from_port(unsigned int port)
689 {
690 return port >> MV_PORT_HC_SHIFT;
691 }
692
693 static inline unsigned int mv_hardport_from_port(unsigned int port)
694 {
695 return port & MV_PORT_MASK;
696 }
697
698 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
699 unsigned int port)
700 {
701 return mv_hc_base(base, mv_hc_from_port(port));
702 }
703
704 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
705 {
706 return mv_hc_base_from_port(base, port) +
707 MV_SATAHC_ARBTR_REG_SZ +
708 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
709 }
710
711 static inline void __iomem *mv_host_base(struct ata_host *host)
712 {
713 struct mv_host_priv *hpriv = host->private_data;
714 return hpriv->base;
715 }
716
717 static inline void __iomem *mv_ap_base(struct ata_port *ap)
718 {
719 return mv_port_base(mv_host_base(ap->host), ap->port_no);
720 }
721
722 static inline int mv_get_hc_count(unsigned long port_flags)
723 {
724 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
725 }
726
727 static void mv_set_edma_ptrs(void __iomem *port_mmio,
728 struct mv_host_priv *hpriv,
729 struct mv_port_priv *pp)
730 {
731 u32 index;
732
733 /*
734 * initialize request queue
735 */
736 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
737
738 WARN_ON(pp->crqb_dma & 0x3ff);
739 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
740 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
741 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
742
743 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
744 writelfl((pp->crqb_dma & 0xffffffff) | index,
745 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
746 else
747 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
748
749 /*
750 * initialize response queue
751 */
752 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
753
754 WARN_ON(pp->crpb_dma & 0xff);
755 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
756
757 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
758 writelfl((pp->crpb_dma & 0xffffffff) | index,
759 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
760 else
761 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
762
763 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
764 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
765 }
766
767 /**
768 * mv_start_dma - Enable eDMA engine
769 * @base: port base address
770 * @pp: port private data
771 *
772 * Verify the local cache of the eDMA state is accurate with a
773 * WARN_ON.
774 *
775 * LOCKING:
776 * Inherited from caller.
777 */
778 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
779 struct mv_port_priv *pp, u8 protocol)
780 {
781 int want_ncq = (protocol == ATA_PROT_NCQ);
782
783 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
784 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
785 if (want_ncq != using_ncq)
786 __mv_stop_dma(ap);
787 }
788 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
789 struct mv_host_priv *hpriv = ap->host->private_data;
790 int hard_port = mv_hardport_from_port(ap->port_no);
791 void __iomem *hc_mmio = mv_hc_base_from_port(
792 mv_host_base(ap->host), hard_port);
793 u32 hc_irq_cause, ipending;
794
795 /* clear EDMA event indicators, if any */
796 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
797
798 /* clear EDMA interrupt indicator, if any */
799 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
800 ipending = (DEV_IRQ << hard_port) |
801 (CRPB_DMA_DONE << hard_port);
802 if (hc_irq_cause & ipending) {
803 writelfl(hc_irq_cause & ~ipending,
804 hc_mmio + HC_IRQ_CAUSE_OFS);
805 }
806
807 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
808
809 /* clear FIS IRQ Cause */
810 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
811
812 mv_set_edma_ptrs(port_mmio, hpriv, pp);
813
814 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
815 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
816 }
817 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
818 }
819
820 /**
821 * __mv_stop_dma - Disable eDMA engine
822 * @ap: ATA channel to manipulate
823 *
824 * Verify the local cache of the eDMA state is accurate with a
825 * WARN_ON.
826 *
827 * LOCKING:
828 * Inherited from caller.
829 */
830 static int __mv_stop_dma(struct ata_port *ap)
831 {
832 void __iomem *port_mmio = mv_ap_base(ap);
833 struct mv_port_priv *pp = ap->private_data;
834 u32 reg;
835 int i, err = 0;
836
837 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
838 /* Disable EDMA if active. The disable bit auto clears.
839 */
840 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
841 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
842 } else {
843 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
844 }
845
846 /* now properly wait for the eDMA to stop */
847 for (i = 1000; i > 0; i--) {
848 reg = readl(port_mmio + EDMA_CMD_OFS);
849 if (!(reg & EDMA_EN))
850 break;
851
852 udelay(100);
853 }
854
855 if (reg & EDMA_EN) {
856 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
857 err = -EIO;
858 }
859
860 return err;
861 }
862
863 static int mv_stop_dma(struct ata_port *ap)
864 {
865 unsigned long flags;
866 int rc;
867
868 spin_lock_irqsave(&ap->host->lock, flags);
869 rc = __mv_stop_dma(ap);
870 spin_unlock_irqrestore(&ap->host->lock, flags);
871
872 return rc;
873 }
874
875 #ifdef ATA_DEBUG
876 static void mv_dump_mem(void __iomem *start, unsigned bytes)
877 {
878 int b, w;
879 for (b = 0; b < bytes; ) {
880 DPRINTK("%p: ", start + b);
881 for (w = 0; b < bytes && w < 4; w++) {
882 printk("%08x ", readl(start + b));
883 b += sizeof(u32);
884 }
885 printk("\n");
886 }
887 }
888 #endif
889
890 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
891 {
892 #ifdef ATA_DEBUG
893 int b, w;
894 u32 dw;
895 for (b = 0; b < bytes; ) {
896 DPRINTK("%02x: ", b);
897 for (w = 0; b < bytes && w < 4; w++) {
898 (void) pci_read_config_dword(pdev, b, &dw);
899 printk("%08x ", dw);
900 b += sizeof(u32);
901 }
902 printk("\n");
903 }
904 #endif
905 }
906 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
907 struct pci_dev *pdev)
908 {
909 #ifdef ATA_DEBUG
910 void __iomem *hc_base = mv_hc_base(mmio_base,
911 port >> MV_PORT_HC_SHIFT);
912 void __iomem *port_base;
913 int start_port, num_ports, p, start_hc, num_hcs, hc;
914
915 if (0 > port) {
916 start_hc = start_port = 0;
917 num_ports = 8; /* shld be benign for 4 port devs */
918 num_hcs = 2;
919 } else {
920 start_hc = port >> MV_PORT_HC_SHIFT;
921 start_port = port;
922 num_ports = num_hcs = 1;
923 }
924 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
925 num_ports > 1 ? num_ports - 1 : start_port);
926
927 if (NULL != pdev) {
928 DPRINTK("PCI config space regs:\n");
929 mv_dump_pci_cfg(pdev, 0x68);
930 }
931 DPRINTK("PCI regs:\n");
932 mv_dump_mem(mmio_base+0xc00, 0x3c);
933 mv_dump_mem(mmio_base+0xd00, 0x34);
934 mv_dump_mem(mmio_base+0xf00, 0x4);
935 mv_dump_mem(mmio_base+0x1d00, 0x6c);
936 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
937 hc_base = mv_hc_base(mmio_base, hc);
938 DPRINTK("HC regs (HC %i):\n", hc);
939 mv_dump_mem(hc_base, 0x1c);
940 }
941 for (p = start_port; p < start_port + num_ports; p++) {
942 port_base = mv_port_base(mmio_base, p);
943 DPRINTK("EDMA regs (port %i):\n", p);
944 mv_dump_mem(port_base, 0x54);
945 DPRINTK("SATA regs (port %i):\n", p);
946 mv_dump_mem(port_base+0x300, 0x60);
947 }
948 #endif
949 }
950
951 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
952 {
953 unsigned int ofs;
954
955 switch (sc_reg_in) {
956 case SCR_STATUS:
957 case SCR_CONTROL:
958 case SCR_ERROR:
959 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
960 break;
961 case SCR_ACTIVE:
962 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
963 break;
964 default:
965 ofs = 0xffffffffU;
966 break;
967 }
968 return ofs;
969 }
970
971 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
972 {
973 unsigned int ofs = mv_scr_offset(sc_reg_in);
974
975 if (ofs != 0xffffffffU) {
976 *val = readl(mv_ap_base(ap) + ofs);
977 return 0;
978 } else
979 return -EINVAL;
980 }
981
982 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
983 {
984 unsigned int ofs = mv_scr_offset(sc_reg_in);
985
986 if (ofs != 0xffffffffU) {
987 writelfl(val, mv_ap_base(ap) + ofs);
988 return 0;
989 } else
990 return -EINVAL;
991 }
992
993 static void mv6_dev_config(struct ata_device *adev)
994 {
995 /*
996 * We don't have hob_nsect when doing NCQ commands on Gen-II.
997 * See mv_qc_prep() for more info.
998 */
999 if (adev->flags & ATA_DFLAG_NCQ)
1000 if (adev->max_sectors > ATA_MAX_SECTORS)
1001 adev->max_sectors = ATA_MAX_SECTORS;
1002 }
1003
1004 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1005 void __iomem *port_mmio, int want_ncq)
1006 {
1007 u32 cfg;
1008
1009 /* set up non-NCQ EDMA configuration */
1010 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1011
1012 if (IS_GEN_I(hpriv))
1013 cfg |= (1 << 8); /* enab config burst size mask */
1014
1015 else if (IS_GEN_II(hpriv))
1016 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1017
1018 else if (IS_GEN_IIE(hpriv)) {
1019 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1020 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1021 cfg |= (1 << 18); /* enab early completion */
1022 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1023 }
1024
1025 if (want_ncq) {
1026 cfg |= EDMA_CFG_NCQ;
1027 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1028 } else
1029 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1030
1031 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1032 }
1033
1034 static void mv_port_free_dma_mem(struct ata_port *ap)
1035 {
1036 struct mv_host_priv *hpriv = ap->host->private_data;
1037 struct mv_port_priv *pp = ap->private_data;
1038 int tag;
1039
1040 if (pp->crqb) {
1041 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1042 pp->crqb = NULL;
1043 }
1044 if (pp->crpb) {
1045 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1046 pp->crpb = NULL;
1047 }
1048 /*
1049 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1050 * For later hardware, we have one unique sg_tbl per NCQ tag.
1051 */
1052 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1053 if (pp->sg_tbl[tag]) {
1054 if (tag == 0 || !IS_GEN_I(hpriv))
1055 dma_pool_free(hpriv->sg_tbl_pool,
1056 pp->sg_tbl[tag],
1057 pp->sg_tbl_dma[tag]);
1058 pp->sg_tbl[tag] = NULL;
1059 }
1060 }
1061 }
1062
1063 /**
1064 * mv_port_start - Port specific init/start routine.
1065 * @ap: ATA channel to manipulate
1066 *
1067 * Allocate and point to DMA memory, init port private memory,
1068 * zero indices.
1069 *
1070 * LOCKING:
1071 * Inherited from caller.
1072 */
1073 static int mv_port_start(struct ata_port *ap)
1074 {
1075 struct device *dev = ap->host->dev;
1076 struct mv_host_priv *hpriv = ap->host->private_data;
1077 struct mv_port_priv *pp;
1078 void __iomem *port_mmio = mv_ap_base(ap);
1079 unsigned long flags;
1080 int tag;
1081
1082 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1083 if (!pp)
1084 return -ENOMEM;
1085 ap->private_data = pp;
1086
1087 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1088 if (!pp->crqb)
1089 return -ENOMEM;
1090 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1091
1092 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1093 if (!pp->crpb)
1094 goto out_port_free_dma_mem;
1095 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1096
1097 /*
1098 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1099 * For later hardware, we need one unique sg_tbl per NCQ tag.
1100 */
1101 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1102 if (tag == 0 || !IS_GEN_I(hpriv)) {
1103 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1104 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1105 if (!pp->sg_tbl[tag])
1106 goto out_port_free_dma_mem;
1107 } else {
1108 pp->sg_tbl[tag] = pp->sg_tbl[0];
1109 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1110 }
1111 }
1112
1113 spin_lock_irqsave(&ap->host->lock, flags);
1114
1115 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1116 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1117
1118 spin_unlock_irqrestore(&ap->host->lock, flags);
1119
1120 /* Don't turn on EDMA here...do it before DMA commands only. Else
1121 * we'll be unable to send non-data, PIO, etc due to restricted access
1122 * to shadow regs.
1123 */
1124 return 0;
1125
1126 out_port_free_dma_mem:
1127 mv_port_free_dma_mem(ap);
1128 return -ENOMEM;
1129 }
1130
1131 /**
1132 * mv_port_stop - Port specific cleanup/stop routine.
1133 * @ap: ATA channel to manipulate
1134 *
1135 * Stop DMA, cleanup port memory.
1136 *
1137 * LOCKING:
1138 * This routine uses the host lock to protect the DMA stop.
1139 */
1140 static void mv_port_stop(struct ata_port *ap)
1141 {
1142 mv_stop_dma(ap);
1143 mv_port_free_dma_mem(ap);
1144 }
1145
1146 /**
1147 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1148 * @qc: queued command whose SG list to source from
1149 *
1150 * Populate the SG list and mark the last entry.
1151 *
1152 * LOCKING:
1153 * Inherited from caller.
1154 */
1155 static void mv_fill_sg(struct ata_queued_cmd *qc)
1156 {
1157 struct mv_port_priv *pp = qc->ap->private_data;
1158 struct scatterlist *sg;
1159 struct mv_sg *mv_sg, *last_sg = NULL;
1160 unsigned int si;
1161
1162 mv_sg = pp->sg_tbl[qc->tag];
1163 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1164 dma_addr_t addr = sg_dma_address(sg);
1165 u32 sg_len = sg_dma_len(sg);
1166
1167 while (sg_len) {
1168 u32 offset = addr & 0xffff;
1169 u32 len = sg_len;
1170
1171 if ((offset + sg_len > 0x10000))
1172 len = 0x10000 - offset;
1173
1174 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1175 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1176 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1177
1178 sg_len -= len;
1179 addr += len;
1180
1181 last_sg = mv_sg;
1182 mv_sg++;
1183 }
1184 }
1185
1186 if (likely(last_sg))
1187 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1188 }
1189
1190 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1191 {
1192 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1193 (last ? CRQB_CMD_LAST : 0);
1194 *cmdw = cpu_to_le16(tmp);
1195 }
1196
1197 /**
1198 * mv_qc_prep - Host specific command preparation.
1199 * @qc: queued command to prepare
1200 *
1201 * This routine simply redirects to the general purpose routine
1202 * if command is not DMA. Else, it handles prep of the CRQB
1203 * (command request block), does some sanity checking, and calls
1204 * the SG load routine.
1205 *
1206 * LOCKING:
1207 * Inherited from caller.
1208 */
1209 static void mv_qc_prep(struct ata_queued_cmd *qc)
1210 {
1211 struct ata_port *ap = qc->ap;
1212 struct mv_port_priv *pp = ap->private_data;
1213 __le16 *cw;
1214 struct ata_taskfile *tf;
1215 u16 flags = 0;
1216 unsigned in_index;
1217
1218 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1219 (qc->tf.protocol != ATA_PROT_NCQ))
1220 return;
1221
1222 /* Fill in command request block
1223 */
1224 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1225 flags |= CRQB_FLAG_READ;
1226 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1227 flags |= qc->tag << CRQB_TAG_SHIFT;
1228
1229 /* get current queue index from software */
1230 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1231
1232 pp->crqb[in_index].sg_addr =
1233 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1234 pp->crqb[in_index].sg_addr_hi =
1235 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1236 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1237
1238 cw = &pp->crqb[in_index].ata_cmd[0];
1239 tf = &qc->tf;
1240
1241 /* Sadly, the CRQB cannot accomodate all registers--there are
1242 * only 11 bytes...so we must pick and choose required
1243 * registers based on the command. So, we drop feature and
1244 * hob_feature for [RW] DMA commands, but they are needed for
1245 * NCQ. NCQ will drop hob_nsect.
1246 */
1247 switch (tf->command) {
1248 case ATA_CMD_READ:
1249 case ATA_CMD_READ_EXT:
1250 case ATA_CMD_WRITE:
1251 case ATA_CMD_WRITE_EXT:
1252 case ATA_CMD_WRITE_FUA_EXT:
1253 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1254 break;
1255 case ATA_CMD_FPDMA_READ:
1256 case ATA_CMD_FPDMA_WRITE:
1257 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1258 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1259 break;
1260 default:
1261 /* The only other commands EDMA supports in non-queued and
1262 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1263 * of which are defined/used by Linux. If we get here, this
1264 * driver needs work.
1265 *
1266 * FIXME: modify libata to give qc_prep a return value and
1267 * return error here.
1268 */
1269 BUG_ON(tf->command);
1270 break;
1271 }
1272 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1273 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1274 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1275 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1276 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1277 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1278 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1279 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1280 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1281
1282 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1283 return;
1284 mv_fill_sg(qc);
1285 }
1286
1287 /**
1288 * mv_qc_prep_iie - Host specific command preparation.
1289 * @qc: queued command to prepare
1290 *
1291 * This routine simply redirects to the general purpose routine
1292 * if command is not DMA. Else, it handles prep of the CRQB
1293 * (command request block), does some sanity checking, and calls
1294 * the SG load routine.
1295 *
1296 * LOCKING:
1297 * Inherited from caller.
1298 */
1299 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1300 {
1301 struct ata_port *ap = qc->ap;
1302 struct mv_port_priv *pp = ap->private_data;
1303 struct mv_crqb_iie *crqb;
1304 struct ata_taskfile *tf;
1305 unsigned in_index;
1306 u32 flags = 0;
1307
1308 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1309 (qc->tf.protocol != ATA_PROT_NCQ))
1310 return;
1311
1312 /* Fill in Gen IIE command request block
1313 */
1314 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1315 flags |= CRQB_FLAG_READ;
1316
1317 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1318 flags |= qc->tag << CRQB_TAG_SHIFT;
1319 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1320
1321 /* get current queue index from software */
1322 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1323
1324 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1325 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1326 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1327 crqb->flags = cpu_to_le32(flags);
1328
1329 tf = &qc->tf;
1330 crqb->ata_cmd[0] = cpu_to_le32(
1331 (tf->command << 16) |
1332 (tf->feature << 24)
1333 );
1334 crqb->ata_cmd[1] = cpu_to_le32(
1335 (tf->lbal << 0) |
1336 (tf->lbam << 8) |
1337 (tf->lbah << 16) |
1338 (tf->device << 24)
1339 );
1340 crqb->ata_cmd[2] = cpu_to_le32(
1341 (tf->hob_lbal << 0) |
1342 (tf->hob_lbam << 8) |
1343 (tf->hob_lbah << 16) |
1344 (tf->hob_feature << 24)
1345 );
1346 crqb->ata_cmd[3] = cpu_to_le32(
1347 (tf->nsect << 0) |
1348 (tf->hob_nsect << 8)
1349 );
1350
1351 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1352 return;
1353 mv_fill_sg(qc);
1354 }
1355
1356 /**
1357 * mv_qc_issue - Initiate a command to the host
1358 * @qc: queued command to start
1359 *
1360 * This routine simply redirects to the general purpose routine
1361 * if command is not DMA. Else, it sanity checks our local
1362 * caches of the request producer/consumer indices then enables
1363 * DMA and bumps the request producer index.
1364 *
1365 * LOCKING:
1366 * Inherited from caller.
1367 */
1368 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1369 {
1370 struct ata_port *ap = qc->ap;
1371 void __iomem *port_mmio = mv_ap_base(ap);
1372 struct mv_port_priv *pp = ap->private_data;
1373 u32 in_index;
1374
1375 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1376 (qc->tf.protocol != ATA_PROT_NCQ)) {
1377 /* We're about to send a non-EDMA capable command to the
1378 * port. Turn off EDMA so there won't be problems accessing
1379 * shadow block, etc registers.
1380 */
1381 __mv_stop_dma(ap);
1382 return ata_qc_issue_prot(qc);
1383 }
1384
1385 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1386
1387 pp->req_idx++;
1388
1389 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1390
1391 /* and write the request in pointer to kick the EDMA to life */
1392 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1393 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1394
1395 return 0;
1396 }
1397
1398 /**
1399 * mv_err_intr - Handle error interrupts on the port
1400 * @ap: ATA channel to manipulate
1401 * @reset_allowed: bool: 0 == don't trigger from reset here
1402 *
1403 * In most cases, just clear the interrupt and move on. However,
1404 * some cases require an eDMA reset, which is done right before
1405 * the COMRESET in mv_phy_reset(). The SERR case requires a
1406 * clear of pending errors in the SATA SERROR register. Finally,
1407 * if the port disabled DMA, update our cached copy to match.
1408 *
1409 * LOCKING:
1410 * Inherited from caller.
1411 */
1412 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1413 {
1414 void __iomem *port_mmio = mv_ap_base(ap);
1415 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1416 struct mv_port_priv *pp = ap->private_data;
1417 struct mv_host_priv *hpriv = ap->host->private_data;
1418 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1419 unsigned int action = 0, err_mask = 0;
1420 struct ata_eh_info *ehi = &ap->link.eh_info;
1421
1422 ata_ehi_clear_desc(ehi);
1423
1424 if (!edma_enabled) {
1425 /* just a guess: do we need to do this? should we
1426 * expand this, and do it in all cases?
1427 */
1428 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1429 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1430 }
1431
1432 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1433
1434 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1435
1436 /*
1437 * all generations share these EDMA error cause bits
1438 */
1439
1440 if (edma_err_cause & EDMA_ERR_DEV)
1441 err_mask |= AC_ERR_DEV;
1442 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1443 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1444 EDMA_ERR_INTRL_PAR)) {
1445 err_mask |= AC_ERR_ATA_BUS;
1446 action |= ATA_EH_RESET;
1447 ata_ehi_push_desc(ehi, "parity error");
1448 }
1449 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1450 ata_ehi_hotplugged(ehi);
1451 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1452 "dev disconnect" : "dev connect");
1453 action |= ATA_EH_RESET;
1454 }
1455
1456 if (IS_GEN_I(hpriv)) {
1457 eh_freeze_mask = EDMA_EH_FREEZE_5;
1458
1459 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1460 pp = ap->private_data;
1461 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1462 ata_ehi_push_desc(ehi, "EDMA self-disable");
1463 }
1464 } else {
1465 eh_freeze_mask = EDMA_EH_FREEZE;
1466
1467 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1468 pp = ap->private_data;
1469 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1470 ata_ehi_push_desc(ehi, "EDMA self-disable");
1471 }
1472
1473 if (edma_err_cause & EDMA_ERR_SERR) {
1474 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1475 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1476 err_mask = AC_ERR_ATA_BUS;
1477 action |= ATA_EH_RESET;
1478 }
1479 }
1480
1481 /* Clear EDMA now that SERR cleanup done */
1482 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1483
1484 if (!err_mask) {
1485 err_mask = AC_ERR_OTHER;
1486 action |= ATA_EH_RESET;
1487 }
1488
1489 ehi->serror |= serr;
1490 ehi->action |= action;
1491
1492 if (qc)
1493 qc->err_mask |= err_mask;
1494 else
1495 ehi->err_mask |= err_mask;
1496
1497 if (edma_err_cause & eh_freeze_mask)
1498 ata_port_freeze(ap);
1499 else
1500 ata_port_abort(ap);
1501 }
1502
1503 static void mv_intr_pio(struct ata_port *ap)
1504 {
1505 struct ata_queued_cmd *qc;
1506 u8 ata_status;
1507
1508 /* ignore spurious intr if drive still BUSY */
1509 ata_status = readb(ap->ioaddr.status_addr);
1510 if (unlikely(ata_status & ATA_BUSY))
1511 return;
1512
1513 /* get active ATA command */
1514 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1515 if (unlikely(!qc)) /* no active tag */
1516 return;
1517 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1518 return;
1519
1520 /* and finally, complete the ATA command */
1521 qc->err_mask |= ac_err_mask(ata_status);
1522 ata_qc_complete(qc);
1523 }
1524
1525 static void mv_intr_edma(struct ata_port *ap)
1526 {
1527 void __iomem *port_mmio = mv_ap_base(ap);
1528 struct mv_host_priv *hpriv = ap->host->private_data;
1529 struct mv_port_priv *pp = ap->private_data;
1530 struct ata_queued_cmd *qc;
1531 u32 out_index, in_index;
1532 bool work_done = false;
1533
1534 /* get h/w response queue pointer */
1535 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1536 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1537
1538 while (1) {
1539 u16 status;
1540 unsigned int tag;
1541
1542 /* get s/w response queue last-read pointer, and compare */
1543 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1544 if (in_index == out_index)
1545 break;
1546
1547 /* 50xx: get active ATA command */
1548 if (IS_GEN_I(hpriv))
1549 tag = ap->link.active_tag;
1550
1551 /* Gen II/IIE: get active ATA command via tag, to enable
1552 * support for queueing. this works transparently for
1553 * queued and non-queued modes.
1554 */
1555 else
1556 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1557
1558 qc = ata_qc_from_tag(ap, tag);
1559
1560 /* For non-NCQ mode, the lower 8 bits of status
1561 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1562 * which should be zero if all went well.
1563 */
1564 status = le16_to_cpu(pp->crpb[out_index].flags);
1565 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1566 mv_err_intr(ap, qc);
1567 return;
1568 }
1569
1570 /* and finally, complete the ATA command */
1571 if (qc) {
1572 qc->err_mask |=
1573 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1574 ata_qc_complete(qc);
1575 }
1576
1577 /* advance software response queue pointer, to
1578 * indicate (after the loop completes) to hardware
1579 * that we have consumed a response queue entry.
1580 */
1581 work_done = true;
1582 pp->resp_idx++;
1583 }
1584
1585 if (work_done)
1586 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1587 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1588 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1589 }
1590
1591 /**
1592 * mv_host_intr - Handle all interrupts on the given host controller
1593 * @host: host specific structure
1594 * @relevant: port error bits relevant to this host controller
1595 * @hc: which host controller we're to look at
1596 *
1597 * Read then write clear the HC interrupt status then walk each
1598 * port connected to the HC and see if it needs servicing. Port
1599 * success ints are reported in the HC interrupt status reg, the
1600 * port error ints are reported in the higher level main
1601 * interrupt status register and thus are passed in via the
1602 * 'relevant' argument.
1603 *
1604 * LOCKING:
1605 * Inherited from caller.
1606 */
1607 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1608 {
1609 struct mv_host_priv *hpriv = host->private_data;
1610 void __iomem *mmio = hpriv->base;
1611 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1612 u32 hc_irq_cause;
1613 int port, port0, last_port;
1614
1615 if (hc == 0)
1616 port0 = 0;
1617 else
1618 port0 = MV_PORTS_PER_HC;
1619
1620 if (HAS_PCI(host))
1621 last_port = port0 + MV_PORTS_PER_HC;
1622 else
1623 last_port = port0 + hpriv->n_ports;
1624 /* we'll need the HC success int register in most cases */
1625 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1626 if (!hc_irq_cause)
1627 return;
1628
1629 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1630
1631 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1632 hc, relevant, hc_irq_cause);
1633
1634 for (port = port0; port < last_port; port++) {
1635 struct ata_port *ap = host->ports[port];
1636 struct mv_port_priv *pp;
1637 int have_err_bits, hard_port, shift;
1638
1639 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1640 continue;
1641
1642 pp = ap->private_data;
1643
1644 shift = port << 1; /* (port * 2) */
1645 if (port >= MV_PORTS_PER_HC) {
1646 shift++; /* skip bit 8 in the HC Main IRQ reg */
1647 }
1648 have_err_bits = ((PORT0_ERR << shift) & relevant);
1649
1650 if (unlikely(have_err_bits)) {
1651 struct ata_queued_cmd *qc;
1652
1653 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1654 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1655 continue;
1656
1657 mv_err_intr(ap, qc);
1658 continue;
1659 }
1660
1661 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1662
1663 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1664 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1665 mv_intr_edma(ap);
1666 } else {
1667 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1668 mv_intr_pio(ap);
1669 }
1670 }
1671 VPRINTK("EXIT\n");
1672 }
1673
1674 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1675 {
1676 struct mv_host_priv *hpriv = host->private_data;
1677 struct ata_port *ap;
1678 struct ata_queued_cmd *qc;
1679 struct ata_eh_info *ehi;
1680 unsigned int i, err_mask, printed = 0;
1681 u32 err_cause;
1682
1683 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1684
1685 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1686 err_cause);
1687
1688 DPRINTK("All regs @ PCI error\n");
1689 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1690
1691 writelfl(0, mmio + hpriv->irq_cause_ofs);
1692
1693 for (i = 0; i < host->n_ports; i++) {
1694 ap = host->ports[i];
1695 if (!ata_link_offline(&ap->link)) {
1696 ehi = &ap->link.eh_info;
1697 ata_ehi_clear_desc(ehi);
1698 if (!printed++)
1699 ata_ehi_push_desc(ehi,
1700 "PCI err cause 0x%08x", err_cause);
1701 err_mask = AC_ERR_HOST_BUS;
1702 ehi->action = ATA_EH_RESET;
1703 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1704 if (qc)
1705 qc->err_mask |= err_mask;
1706 else
1707 ehi->err_mask |= err_mask;
1708
1709 ata_port_freeze(ap);
1710 }
1711 }
1712 }
1713
1714 /**
1715 * mv_interrupt - Main interrupt event handler
1716 * @irq: unused
1717 * @dev_instance: private data; in this case the host structure
1718 *
1719 * Read the read only register to determine if any host
1720 * controllers have pending interrupts. If so, call lower level
1721 * routine to handle. Also check for PCI errors which are only
1722 * reported here.
1723 *
1724 * LOCKING:
1725 * This routine holds the host lock while processing pending
1726 * interrupts.
1727 */
1728 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1729 {
1730 struct ata_host *host = dev_instance;
1731 struct mv_host_priv *hpriv = host->private_data;
1732 unsigned int hc, handled = 0, n_hcs;
1733 void __iomem *mmio = hpriv->base;
1734 u32 irq_stat, irq_mask;
1735
1736 spin_lock(&host->lock);
1737
1738 irq_stat = readl(hpriv->main_cause_reg_addr);
1739 irq_mask = readl(hpriv->main_mask_reg_addr);
1740
1741 /* check the cases where we either have nothing pending or have read
1742 * a bogus register value which can indicate HW removal or PCI fault
1743 */
1744 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1745 goto out_unlock;
1746
1747 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1748
1749 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1750 mv_pci_error(host, mmio);
1751 handled = 1;
1752 goto out_unlock; /* skip all other HC irq handling */
1753 }
1754
1755 for (hc = 0; hc < n_hcs; hc++) {
1756 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1757 if (relevant) {
1758 mv_host_intr(host, relevant, hc);
1759 handled = 1;
1760 }
1761 }
1762
1763 out_unlock:
1764 spin_unlock(&host->lock);
1765
1766 return IRQ_RETVAL(handled);
1767 }
1768
1769 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1770 {
1771 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1772 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1773
1774 return hc_mmio + ofs;
1775 }
1776
1777 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1778 {
1779 unsigned int ofs;
1780
1781 switch (sc_reg_in) {
1782 case SCR_STATUS:
1783 case SCR_ERROR:
1784 case SCR_CONTROL:
1785 ofs = sc_reg_in * sizeof(u32);
1786 break;
1787 default:
1788 ofs = 0xffffffffU;
1789 break;
1790 }
1791 return ofs;
1792 }
1793
1794 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1795 {
1796 struct mv_host_priv *hpriv = ap->host->private_data;
1797 void __iomem *mmio = hpriv->base;
1798 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1799 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1800
1801 if (ofs != 0xffffffffU) {
1802 *val = readl(addr + ofs);
1803 return 0;
1804 } else
1805 return -EINVAL;
1806 }
1807
1808 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1809 {
1810 struct mv_host_priv *hpriv = ap->host->private_data;
1811 void __iomem *mmio = hpriv->base;
1812 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1813 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1814
1815 if (ofs != 0xffffffffU) {
1816 writelfl(val, addr + ofs);
1817 return 0;
1818 } else
1819 return -EINVAL;
1820 }
1821
1822 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1823 {
1824 struct pci_dev *pdev = to_pci_dev(host->dev);
1825 int early_5080;
1826
1827 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1828
1829 if (!early_5080) {
1830 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1831 tmp |= (1 << 0);
1832 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1833 }
1834
1835 mv_reset_pci_bus(host, mmio);
1836 }
1837
1838 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1839 {
1840 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1841 }
1842
1843 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1844 void __iomem *mmio)
1845 {
1846 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1847 u32 tmp;
1848
1849 tmp = readl(phy_mmio + MV5_PHY_MODE);
1850
1851 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1852 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1853 }
1854
1855 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1856 {
1857 u32 tmp;
1858
1859 writel(0, mmio + MV_GPIO_PORT_CTL);
1860
1861 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1862
1863 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1864 tmp |= ~(1 << 0);
1865 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1866 }
1867
1868 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1869 unsigned int port)
1870 {
1871 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1872 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1873 u32 tmp;
1874 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1875
1876 if (fix_apm_sq) {
1877 tmp = readl(phy_mmio + MV5_LT_MODE);
1878 tmp |= (1 << 19);
1879 writel(tmp, phy_mmio + MV5_LT_MODE);
1880
1881 tmp = readl(phy_mmio + MV5_PHY_CTL);
1882 tmp &= ~0x3;
1883 tmp |= 0x1;
1884 writel(tmp, phy_mmio + MV5_PHY_CTL);
1885 }
1886
1887 tmp = readl(phy_mmio + MV5_PHY_MODE);
1888 tmp &= ~mask;
1889 tmp |= hpriv->signal[port].pre;
1890 tmp |= hpriv->signal[port].amps;
1891 writel(tmp, phy_mmio + MV5_PHY_MODE);
1892 }
1893
1894
1895 #undef ZERO
1896 #define ZERO(reg) writel(0, port_mmio + (reg))
1897 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1898 unsigned int port)
1899 {
1900 void __iomem *port_mmio = mv_port_base(mmio, port);
1901
1902 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1903
1904 mv_channel_reset(hpriv, mmio, port);
1905
1906 ZERO(0x028); /* command */
1907 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1908 ZERO(0x004); /* timer */
1909 ZERO(0x008); /* irq err cause */
1910 ZERO(0x00c); /* irq err mask */
1911 ZERO(0x010); /* rq bah */
1912 ZERO(0x014); /* rq inp */
1913 ZERO(0x018); /* rq outp */
1914 ZERO(0x01c); /* respq bah */
1915 ZERO(0x024); /* respq outp */
1916 ZERO(0x020); /* respq inp */
1917 ZERO(0x02c); /* test control */
1918 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1919 }
1920 #undef ZERO
1921
1922 #define ZERO(reg) writel(0, hc_mmio + (reg))
1923 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1924 unsigned int hc)
1925 {
1926 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1927 u32 tmp;
1928
1929 ZERO(0x00c);
1930 ZERO(0x010);
1931 ZERO(0x014);
1932 ZERO(0x018);
1933
1934 tmp = readl(hc_mmio + 0x20);
1935 tmp &= 0x1c1c1c1c;
1936 tmp |= 0x03030303;
1937 writel(tmp, hc_mmio + 0x20);
1938 }
1939 #undef ZERO
1940
1941 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1942 unsigned int n_hc)
1943 {
1944 unsigned int hc, port;
1945
1946 for (hc = 0; hc < n_hc; hc++) {
1947 for (port = 0; port < MV_PORTS_PER_HC; port++)
1948 mv5_reset_hc_port(hpriv, mmio,
1949 (hc * MV_PORTS_PER_HC) + port);
1950
1951 mv5_reset_one_hc(hpriv, mmio, hc);
1952 }
1953
1954 return 0;
1955 }
1956
1957 #undef ZERO
1958 #define ZERO(reg) writel(0, mmio + (reg))
1959 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1960 {
1961 struct mv_host_priv *hpriv = host->private_data;
1962 u32 tmp;
1963
1964 tmp = readl(mmio + MV_PCI_MODE);
1965 tmp &= 0xff00ffff;
1966 writel(tmp, mmio + MV_PCI_MODE);
1967
1968 ZERO(MV_PCI_DISC_TIMER);
1969 ZERO(MV_PCI_MSI_TRIGGER);
1970 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1971 ZERO(HC_MAIN_IRQ_MASK_OFS);
1972 ZERO(MV_PCI_SERR_MASK);
1973 ZERO(hpriv->irq_cause_ofs);
1974 ZERO(hpriv->irq_mask_ofs);
1975 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1976 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1977 ZERO(MV_PCI_ERR_ATTRIBUTE);
1978 ZERO(MV_PCI_ERR_COMMAND);
1979 }
1980 #undef ZERO
1981
1982 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1983 {
1984 u32 tmp;
1985
1986 mv5_reset_flash(hpriv, mmio);
1987
1988 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1989 tmp &= 0x3;
1990 tmp |= (1 << 5) | (1 << 6);
1991 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1992 }
1993
1994 /**
1995 * mv6_reset_hc - Perform the 6xxx global soft reset
1996 * @mmio: base address of the HBA
1997 *
1998 * This routine only applies to 6xxx parts.
1999 *
2000 * LOCKING:
2001 * Inherited from caller.
2002 */
2003 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2004 unsigned int n_hc)
2005 {
2006 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2007 int i, rc = 0;
2008 u32 t;
2009
2010 /* Following procedure defined in PCI "main command and status
2011 * register" table.
2012 */
2013 t = readl(reg);
2014 writel(t | STOP_PCI_MASTER, reg);
2015
2016 for (i = 0; i < 1000; i++) {
2017 udelay(1);
2018 t = readl(reg);
2019 if (PCI_MASTER_EMPTY & t)
2020 break;
2021 }
2022 if (!(PCI_MASTER_EMPTY & t)) {
2023 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2024 rc = 1;
2025 goto done;
2026 }
2027
2028 /* set reset */
2029 i = 5;
2030 do {
2031 writel(t | GLOB_SFT_RST, reg);
2032 t = readl(reg);
2033 udelay(1);
2034 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2035
2036 if (!(GLOB_SFT_RST & t)) {
2037 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2038 rc = 1;
2039 goto done;
2040 }
2041
2042 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2043 i = 5;
2044 do {
2045 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2046 t = readl(reg);
2047 udelay(1);
2048 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2049
2050 if (GLOB_SFT_RST & t) {
2051 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2052 rc = 1;
2053 }
2054 done:
2055 return rc;
2056 }
2057
2058 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2059 void __iomem *mmio)
2060 {
2061 void __iomem *port_mmio;
2062 u32 tmp;
2063
2064 tmp = readl(mmio + MV_RESET_CFG);
2065 if ((tmp & (1 << 0)) == 0) {
2066 hpriv->signal[idx].amps = 0x7 << 8;
2067 hpriv->signal[idx].pre = 0x1 << 5;
2068 return;
2069 }
2070
2071 port_mmio = mv_port_base(mmio, idx);
2072 tmp = readl(port_mmio + PHY_MODE2);
2073
2074 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2075 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2076 }
2077
2078 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2079 {
2080 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2081 }
2082
2083 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2084 unsigned int port)
2085 {
2086 void __iomem *port_mmio = mv_port_base(mmio, port);
2087
2088 u32 hp_flags = hpriv->hp_flags;
2089 int fix_phy_mode2 =
2090 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2091 int fix_phy_mode4 =
2092 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2093 u32 m2, tmp;
2094
2095 if (fix_phy_mode2) {
2096 m2 = readl(port_mmio + PHY_MODE2);
2097 m2 &= ~(1 << 16);
2098 m2 |= (1 << 31);
2099 writel(m2, port_mmio + PHY_MODE2);
2100
2101 udelay(200);
2102
2103 m2 = readl(port_mmio + PHY_MODE2);
2104 m2 &= ~((1 << 16) | (1 << 31));
2105 writel(m2, port_mmio + PHY_MODE2);
2106
2107 udelay(200);
2108 }
2109
2110 /* who knows what this magic does */
2111 tmp = readl(port_mmio + PHY_MODE3);
2112 tmp &= ~0x7F800000;
2113 tmp |= 0x2A800000;
2114 writel(tmp, port_mmio + PHY_MODE3);
2115
2116 if (fix_phy_mode4) {
2117 u32 m4;
2118
2119 m4 = readl(port_mmio + PHY_MODE4);
2120
2121 if (hp_flags & MV_HP_ERRATA_60X1B2)
2122 tmp = readl(port_mmio + 0x310);
2123
2124 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2125
2126 writel(m4, port_mmio + PHY_MODE4);
2127
2128 if (hp_flags & MV_HP_ERRATA_60X1B2)
2129 writel(tmp, port_mmio + 0x310);
2130 }
2131
2132 /* Revert values of pre-emphasis and signal amps to the saved ones */
2133 m2 = readl(port_mmio + PHY_MODE2);
2134
2135 m2 &= ~MV_M2_PREAMP_MASK;
2136 m2 |= hpriv->signal[port].amps;
2137 m2 |= hpriv->signal[port].pre;
2138 m2 &= ~(1 << 16);
2139
2140 /* according to mvSata 3.6.1, some IIE values are fixed */
2141 if (IS_GEN_IIE(hpriv)) {
2142 m2 &= ~0xC30FF01F;
2143 m2 |= 0x0000900F;
2144 }
2145
2146 writel(m2, port_mmio + PHY_MODE2);
2147 }
2148
2149 /* TODO: use the generic LED interface to configure the SATA Presence */
2150 /* & Acitivy LEDs on the board */
2151 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2152 void __iomem *mmio)
2153 {
2154 return;
2155 }
2156
2157 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2158 void __iomem *mmio)
2159 {
2160 void __iomem *port_mmio;
2161 u32 tmp;
2162
2163 port_mmio = mv_port_base(mmio, idx);
2164 tmp = readl(port_mmio + PHY_MODE2);
2165
2166 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2167 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2168 }
2169
2170 #undef ZERO
2171 #define ZERO(reg) writel(0, port_mmio + (reg))
2172 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2173 void __iomem *mmio, unsigned int port)
2174 {
2175 void __iomem *port_mmio = mv_port_base(mmio, port);
2176
2177 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2178
2179 mv_channel_reset(hpriv, mmio, port);
2180
2181 ZERO(0x028); /* command */
2182 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2183 ZERO(0x004); /* timer */
2184 ZERO(0x008); /* irq err cause */
2185 ZERO(0x00c); /* irq err mask */
2186 ZERO(0x010); /* rq bah */
2187 ZERO(0x014); /* rq inp */
2188 ZERO(0x018); /* rq outp */
2189 ZERO(0x01c); /* respq bah */
2190 ZERO(0x024); /* respq outp */
2191 ZERO(0x020); /* respq inp */
2192 ZERO(0x02c); /* test control */
2193 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2194 }
2195
2196 #undef ZERO
2197
2198 #define ZERO(reg) writel(0, hc_mmio + (reg))
2199 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2200 void __iomem *mmio)
2201 {
2202 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2203
2204 ZERO(0x00c);
2205 ZERO(0x010);
2206 ZERO(0x014);
2207
2208 }
2209
2210 #undef ZERO
2211
2212 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2213 void __iomem *mmio, unsigned int n_hc)
2214 {
2215 unsigned int port;
2216
2217 for (port = 0; port < hpriv->n_ports; port++)
2218 mv_soc_reset_hc_port(hpriv, mmio, port);
2219
2220 mv_soc_reset_one_hc(hpriv, mmio);
2221
2222 return 0;
2223 }
2224
2225 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2226 void __iomem *mmio)
2227 {
2228 return;
2229 }
2230
2231 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2232 {
2233 return;
2234 }
2235
2236 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2237 unsigned int port_no)
2238 {
2239 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2240
2241 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2242
2243 if (IS_GEN_II(hpriv)) {
2244 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2245 ifctl |= (1 << 7); /* enable gen2i speed */
2246 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2247 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2248 }
2249
2250 udelay(25); /* allow reset propagation */
2251
2252 /* Spec never mentions clearing the bit. Marvell's driver does
2253 * clear the bit, however.
2254 */
2255 writelfl(0, port_mmio + EDMA_CMD_OFS);
2256
2257 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2258
2259 if (IS_GEN_I(hpriv))
2260 mdelay(1);
2261 }
2262
2263 /**
2264 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2265 * @ap: ATA channel to manipulate
2266 *
2267 * Part of this is taken from __sata_phy_reset and modified to
2268 * not sleep since this routine gets called from interrupt level.
2269 *
2270 * LOCKING:
2271 * Inherited from caller. This is coded to safe to call at
2272 * interrupt level, i.e. it does not sleep.
2273 */
2274 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2275 unsigned long deadline)
2276 {
2277 struct mv_port_priv *pp = ap->private_data;
2278 struct mv_host_priv *hpriv = ap->host->private_data;
2279 void __iomem *port_mmio = mv_ap_base(ap);
2280 int retry = 5;
2281 u32 sstatus;
2282
2283 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2284
2285 #ifdef DEBUG
2286 {
2287 u32 sstatus, serror, scontrol;
2288
2289 mv_scr_read(ap, SCR_STATUS, &sstatus);
2290 mv_scr_read(ap, SCR_ERROR, &serror);
2291 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2292 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2293 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2294 }
2295 #endif
2296
2297 /* Issue COMRESET via SControl */
2298 comreset_retry:
2299 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2300 msleep(1);
2301
2302 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2303 msleep(20);
2304
2305 do {
2306 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2307 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2308 break;
2309
2310 msleep(1);
2311 } while (time_before(jiffies, deadline));
2312
2313 /* work around errata */
2314 if (IS_GEN_II(hpriv) &&
2315 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2316 (retry-- > 0))
2317 goto comreset_retry;
2318
2319 #ifdef DEBUG
2320 {
2321 u32 sstatus, serror, scontrol;
2322
2323 mv_scr_read(ap, SCR_STATUS, &sstatus);
2324 mv_scr_read(ap, SCR_ERROR, &serror);
2325 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2326 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2327 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2328 }
2329 #endif
2330
2331 if (ata_link_offline(&ap->link)) {
2332 *class = ATA_DEV_NONE;
2333 return;
2334 }
2335
2336 /* even after SStatus reflects that device is ready,
2337 * it seems to take a while for link to be fully
2338 * established (and thus Status no longer 0x80/0x7F),
2339 * so we poll a bit for that, here.
2340 */
2341 retry = 20;
2342 while (1) {
2343 u8 drv_stat = ata_check_status(ap);
2344 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2345 break;
2346 msleep(500);
2347 if (retry-- <= 0)
2348 break;
2349 if (time_after(jiffies, deadline))
2350 break;
2351 }
2352
2353 /* FIXME: if we passed the deadline, the following
2354 * code probably produces an invalid result
2355 */
2356
2357 /* finally, read device signature from TF registers */
2358 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2359
2360 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2361
2362 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2363
2364 VPRINTK("EXIT\n");
2365 }
2366
2367 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2368 {
2369 struct ata_port *ap = link->ap;
2370 struct mv_port_priv *pp = ap->private_data;
2371
2372 mv_stop_dma(ap);
2373
2374 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
2375 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2376
2377 return 0;
2378 }
2379
2380 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2381 unsigned long deadline)
2382 {
2383 struct ata_port *ap = link->ap;
2384 struct mv_host_priv *hpriv = ap->host->private_data;
2385 void __iomem *mmio = hpriv->base;
2386
2387 mv_stop_dma(ap);
2388
2389 mv_channel_reset(hpriv, mmio, ap->port_no);
2390
2391 mv_phy_reset(ap, class, deadline);
2392
2393 return 0;
2394 }
2395
2396 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2397 {
2398 struct ata_port *ap = link->ap;
2399 u32 serr;
2400
2401 /* print link status */
2402 sata_print_link_status(link);
2403
2404 /* clear SError */
2405 sata_scr_read(link, SCR_ERROR, &serr);
2406 sata_scr_write_flush(link, SCR_ERROR, serr);
2407
2408 /* bail out if no device is present */
2409 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2410 DPRINTK("EXIT, no device\n");
2411 return;
2412 }
2413
2414 /* set up device control */
2415 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2416 }
2417
2418 static void mv_error_handler(struct ata_port *ap)
2419 {
2420 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2421 mv_hardreset, mv_postreset);
2422 }
2423
2424 static void mv_eh_freeze(struct ata_port *ap)
2425 {
2426 struct mv_host_priv *hpriv = ap->host->private_data;
2427 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2428 u32 tmp, mask;
2429 unsigned int shift;
2430
2431 /* FIXME: handle coalescing completion events properly */
2432
2433 shift = ap->port_no * 2;
2434 if (hc > 0)
2435 shift++;
2436
2437 mask = 0x3 << shift;
2438
2439 /* disable assertion of portN err, done events */
2440 tmp = readl(hpriv->main_mask_reg_addr);
2441 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2442 }
2443
2444 static void mv_eh_thaw(struct ata_port *ap)
2445 {
2446 struct mv_host_priv *hpriv = ap->host->private_data;
2447 void __iomem *mmio = hpriv->base;
2448 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2449 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2450 void __iomem *port_mmio = mv_ap_base(ap);
2451 u32 tmp, mask, hc_irq_cause;
2452 unsigned int shift, hc_port_no = ap->port_no;
2453
2454 /* FIXME: handle coalescing completion events properly */
2455
2456 shift = ap->port_no * 2;
2457 if (hc > 0) {
2458 shift++;
2459 hc_port_no -= 4;
2460 }
2461
2462 mask = 0x3 << shift;
2463
2464 /* clear EDMA errors on this port */
2465 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2466
2467 /* clear pending irq events */
2468 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2469 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2470 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2471 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2472
2473 /* enable assertion of portN err, done events */
2474 tmp = readl(hpriv->main_mask_reg_addr);
2475 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2476 }
2477
2478 /**
2479 * mv_port_init - Perform some early initialization on a single port.
2480 * @port: libata data structure storing shadow register addresses
2481 * @port_mmio: base address of the port
2482 *
2483 * Initialize shadow register mmio addresses, clear outstanding
2484 * interrupts on the port, and unmask interrupts for the future
2485 * start of the port.
2486 *
2487 * LOCKING:
2488 * Inherited from caller.
2489 */
2490 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2491 {
2492 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2493 unsigned serr_ofs;
2494
2495 /* PIO related setup
2496 */
2497 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2498 port->error_addr =
2499 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2500 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2501 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2502 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2503 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2504 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2505 port->status_addr =
2506 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2507 /* special case: control/altstatus doesn't have ATA_REG_ address */
2508 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2509
2510 /* unused: */
2511 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2512
2513 /* Clear any currently outstanding port interrupt conditions */
2514 serr_ofs = mv_scr_offset(SCR_ERROR);
2515 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2516 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2517
2518 /* unmask all non-transient EDMA error interrupts */
2519 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2520
2521 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2522 readl(port_mmio + EDMA_CFG_OFS),
2523 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2524 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2525 }
2526
2527 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2528 {
2529 struct pci_dev *pdev = to_pci_dev(host->dev);
2530 struct mv_host_priv *hpriv = host->private_data;
2531 u32 hp_flags = hpriv->hp_flags;
2532
2533 switch (board_idx) {
2534 case chip_5080:
2535 hpriv->ops = &mv5xxx_ops;
2536 hp_flags |= MV_HP_GEN_I;
2537
2538 switch (pdev->revision) {
2539 case 0x1:
2540 hp_flags |= MV_HP_ERRATA_50XXB0;
2541 break;
2542 case 0x3:
2543 hp_flags |= MV_HP_ERRATA_50XXB2;
2544 break;
2545 default:
2546 dev_printk(KERN_WARNING, &pdev->dev,
2547 "Applying 50XXB2 workarounds to unknown rev\n");
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 break;
2550 }
2551 break;
2552
2553 case chip_504x:
2554 case chip_508x:
2555 hpriv->ops = &mv5xxx_ops;
2556 hp_flags |= MV_HP_GEN_I;
2557
2558 switch (pdev->revision) {
2559 case 0x0:
2560 hp_flags |= MV_HP_ERRATA_50XXB0;
2561 break;
2562 case 0x3:
2563 hp_flags |= MV_HP_ERRATA_50XXB2;
2564 break;
2565 default:
2566 dev_printk(KERN_WARNING, &pdev->dev,
2567 "Applying B2 workarounds to unknown rev\n");
2568 hp_flags |= MV_HP_ERRATA_50XXB2;
2569 break;
2570 }
2571 break;
2572
2573 case chip_604x:
2574 case chip_608x:
2575 hpriv->ops = &mv6xxx_ops;
2576 hp_flags |= MV_HP_GEN_II;
2577
2578 switch (pdev->revision) {
2579 case 0x7:
2580 hp_flags |= MV_HP_ERRATA_60X1B2;
2581 break;
2582 case 0x9:
2583 hp_flags |= MV_HP_ERRATA_60X1C0;
2584 break;
2585 default:
2586 dev_printk(KERN_WARNING, &pdev->dev,
2587 "Applying B2 workarounds to unknown rev\n");
2588 hp_flags |= MV_HP_ERRATA_60X1B2;
2589 break;
2590 }
2591 break;
2592
2593 case chip_7042:
2594 hp_flags |= MV_HP_PCIE;
2595 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2596 (pdev->device == 0x2300 || pdev->device == 0x2310))
2597 {
2598 /*
2599 * Highpoint RocketRAID PCIe 23xx series cards:
2600 *
2601 * Unconfigured drives are treated as "Legacy"
2602 * by the BIOS, and it overwrites sector 8 with
2603 * a "Lgcy" metadata block prior to Linux boot.
2604 *
2605 * Configured drives (RAID or JBOD) leave sector 8
2606 * alone, but instead overwrite a high numbered
2607 * sector for the RAID metadata. This sector can
2608 * be determined exactly, by truncating the physical
2609 * drive capacity to a nice even GB value.
2610 *
2611 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2612 *
2613 * Warn the user, lest they think we're just buggy.
2614 */
2615 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2616 " BIOS CORRUPTS DATA on all attached drives,"
2617 " regardless of if/how they are configured."
2618 " BEWARE!\n");
2619 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2620 " use sectors 8-9 on \"Legacy\" drives,"
2621 " and avoid the final two gigabytes on"
2622 " all RocketRAID BIOS initialized drives.\n");
2623 }
2624 case chip_6042:
2625 hpriv->ops = &mv6xxx_ops;
2626 hp_flags |= MV_HP_GEN_IIE;
2627
2628 switch (pdev->revision) {
2629 case 0x0:
2630 hp_flags |= MV_HP_ERRATA_XX42A0;
2631 break;
2632 case 0x1:
2633 hp_flags |= MV_HP_ERRATA_60X1C0;
2634 break;
2635 default:
2636 dev_printk(KERN_WARNING, &pdev->dev,
2637 "Applying 60X1C0 workarounds to unknown rev\n");
2638 hp_flags |= MV_HP_ERRATA_60X1C0;
2639 break;
2640 }
2641 break;
2642 case chip_soc:
2643 hpriv->ops = &mv_soc_ops;
2644 hp_flags |= MV_HP_ERRATA_60X1C0;
2645 break;
2646
2647 default:
2648 dev_printk(KERN_ERR, host->dev,
2649 "BUG: invalid board index %u\n", board_idx);
2650 return 1;
2651 }
2652
2653 hpriv->hp_flags = hp_flags;
2654 if (hp_flags & MV_HP_PCIE) {
2655 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2656 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2657 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2658 } else {
2659 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2660 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2661 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2662 }
2663
2664 return 0;
2665 }
2666
2667 /**
2668 * mv_init_host - Perform some early initialization of the host.
2669 * @host: ATA host to initialize
2670 * @board_idx: controller index
2671 *
2672 * If possible, do an early global reset of the host. Then do
2673 * our port init and clear/unmask all/relevant host interrupts.
2674 *
2675 * LOCKING:
2676 * Inherited from caller.
2677 */
2678 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2679 {
2680 int rc = 0, n_hc, port, hc;
2681 struct mv_host_priv *hpriv = host->private_data;
2682 void __iomem *mmio = hpriv->base;
2683
2684 rc = mv_chip_id(host, board_idx);
2685 if (rc)
2686 goto done;
2687
2688 if (HAS_PCI(host)) {
2689 hpriv->main_cause_reg_addr = hpriv->base +
2690 HC_MAIN_IRQ_CAUSE_OFS;
2691 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2692 } else {
2693 hpriv->main_cause_reg_addr = hpriv->base +
2694 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2695 hpriv->main_mask_reg_addr = hpriv->base +
2696 HC_SOC_MAIN_IRQ_MASK_OFS;
2697 }
2698 /* global interrupt mask */
2699 writel(0, hpriv->main_mask_reg_addr);
2700
2701 n_hc = mv_get_hc_count(host->ports[0]->flags);
2702
2703 for (port = 0; port < host->n_ports; port++)
2704 hpriv->ops->read_preamp(hpriv, port, mmio);
2705
2706 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2707 if (rc)
2708 goto done;
2709
2710 hpriv->ops->reset_flash(hpriv, mmio);
2711 hpriv->ops->reset_bus(host, mmio);
2712 hpriv->ops->enable_leds(hpriv, mmio);
2713
2714 for (port = 0; port < host->n_ports; port++) {
2715 if (IS_GEN_II(hpriv)) {
2716 void __iomem *port_mmio = mv_port_base(mmio, port);
2717
2718 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2719 ifctl |= (1 << 7); /* enable gen2i speed */
2720 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2721 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2722 }
2723
2724 hpriv->ops->phy_errata(hpriv, mmio, port);
2725 }
2726
2727 for (port = 0; port < host->n_ports; port++) {
2728 struct ata_port *ap = host->ports[port];
2729 void __iomem *port_mmio = mv_port_base(mmio, port);
2730
2731 mv_port_init(&ap->ioaddr, port_mmio);
2732
2733 #ifdef CONFIG_PCI
2734 if (HAS_PCI(host)) {
2735 unsigned int offset = port_mmio - mmio;
2736 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2737 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2738 }
2739 #endif
2740 }
2741
2742 for (hc = 0; hc < n_hc; hc++) {
2743 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2744
2745 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2746 "(before clear)=0x%08x\n", hc,
2747 readl(hc_mmio + HC_CFG_OFS),
2748 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2749
2750 /* Clear any currently outstanding hc interrupt conditions */
2751 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2752 }
2753
2754 if (HAS_PCI(host)) {
2755 /* Clear any currently outstanding host interrupt conditions */
2756 writelfl(0, mmio + hpriv->irq_cause_ofs);
2757
2758 /* and unmask interrupt generation for host regs */
2759 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2760 if (IS_GEN_I(hpriv))
2761 writelfl(~HC_MAIN_MASKED_IRQS_5,
2762 hpriv->main_mask_reg_addr);
2763 else
2764 writelfl(~HC_MAIN_MASKED_IRQS,
2765 hpriv->main_mask_reg_addr);
2766
2767 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2768 "PCI int cause/mask=0x%08x/0x%08x\n",
2769 readl(hpriv->main_cause_reg_addr),
2770 readl(hpriv->main_mask_reg_addr),
2771 readl(mmio + hpriv->irq_cause_ofs),
2772 readl(mmio + hpriv->irq_mask_ofs));
2773 } else {
2774 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2775 hpriv->main_mask_reg_addr);
2776 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2777 readl(hpriv->main_cause_reg_addr),
2778 readl(hpriv->main_mask_reg_addr));
2779 }
2780 done:
2781 return rc;
2782 }
2783
2784 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2785 {
2786 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2787 MV_CRQB_Q_SZ, 0);
2788 if (!hpriv->crqb_pool)
2789 return -ENOMEM;
2790
2791 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2792 MV_CRPB_Q_SZ, 0);
2793 if (!hpriv->crpb_pool)
2794 return -ENOMEM;
2795
2796 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2797 MV_SG_TBL_SZ, 0);
2798 if (!hpriv->sg_tbl_pool)
2799 return -ENOMEM;
2800
2801 return 0;
2802 }
2803
2804 /**
2805 * mv_platform_probe - handle a positive probe of an soc Marvell
2806 * host
2807 * @pdev: platform device found
2808 *
2809 * LOCKING:
2810 * Inherited from caller.
2811 */
2812 static int mv_platform_probe(struct platform_device *pdev)
2813 {
2814 static int printed_version;
2815 const struct mv_sata_platform_data *mv_platform_data;
2816 const struct ata_port_info *ppi[] =
2817 { &mv_port_info[chip_soc], NULL };
2818 struct ata_host *host;
2819 struct mv_host_priv *hpriv;
2820 struct resource *res;
2821 int n_ports, rc;
2822
2823 if (!printed_version++)
2824 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2825
2826 /*
2827 * Simple resource validation ..
2828 */
2829 if (unlikely(pdev->num_resources != 2)) {
2830 dev_err(&pdev->dev, "invalid number of resources\n");
2831 return -EINVAL;
2832 }
2833
2834 /*
2835 * Get the register base first
2836 */
2837 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2838 if (res == NULL)
2839 return -EINVAL;
2840
2841 /* allocate host */
2842 mv_platform_data = pdev->dev.platform_data;
2843 n_ports = mv_platform_data->n_ports;
2844
2845 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2846 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2847
2848 if (!host || !hpriv)
2849 return -ENOMEM;
2850 host->private_data = hpriv;
2851 hpriv->n_ports = n_ports;
2852
2853 host->iomap = NULL;
2854 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2855 res->end - res->start + 1);
2856 hpriv->base -= MV_SATAHC0_REG_BASE;
2857
2858 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2859 if (rc)
2860 return rc;
2861
2862 /* initialize adapter */
2863 rc = mv_init_host(host, chip_soc);
2864 if (rc)
2865 return rc;
2866
2867 dev_printk(KERN_INFO, &pdev->dev,
2868 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2869 host->n_ports);
2870
2871 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2872 IRQF_SHARED, &mv6_sht);
2873 }
2874
2875 /*
2876 *
2877 * mv_platform_remove - unplug a platform interface
2878 * @pdev: platform device
2879 *
2880 * A platform bus SATA device has been unplugged. Perform the needed
2881 * cleanup. Also called on module unload for any active devices.
2882 */
2883 static int __devexit mv_platform_remove(struct platform_device *pdev)
2884 {
2885 struct device *dev = &pdev->dev;
2886 struct ata_host *host = dev_get_drvdata(dev);
2887
2888 ata_host_detach(host);
2889 return 0;
2890 }
2891
2892 static struct platform_driver mv_platform_driver = {
2893 .probe = mv_platform_probe,
2894 .remove = __devexit_p(mv_platform_remove),
2895 .driver = {
2896 .name = DRV_NAME,
2897 .owner = THIS_MODULE,
2898 },
2899 };
2900
2901
2902 #ifdef CONFIG_PCI
2903 static int mv_pci_init_one(struct pci_dev *pdev,
2904 const struct pci_device_id *ent);
2905
2906
2907 static struct pci_driver mv_pci_driver = {
2908 .name = DRV_NAME,
2909 .id_table = mv_pci_tbl,
2910 .probe = mv_pci_init_one,
2911 .remove = ata_pci_remove_one,
2912 };
2913
2914 /*
2915 * module options
2916 */
2917 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2918
2919
2920 /* move to PCI layer or libata core? */
2921 static int pci_go_64(struct pci_dev *pdev)
2922 {
2923 int rc;
2924
2925 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2926 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2927 if (rc) {
2928 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2929 if (rc) {
2930 dev_printk(KERN_ERR, &pdev->dev,
2931 "64-bit DMA enable failed\n");
2932 return rc;
2933 }
2934 }
2935 } else {
2936 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2937 if (rc) {
2938 dev_printk(KERN_ERR, &pdev->dev,
2939 "32-bit DMA enable failed\n");
2940 return rc;
2941 }
2942 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2943 if (rc) {
2944 dev_printk(KERN_ERR, &pdev->dev,
2945 "32-bit consistent DMA enable failed\n");
2946 return rc;
2947 }
2948 }
2949
2950 return rc;
2951 }
2952
2953 /**
2954 * mv_print_info - Dump key info to kernel log for perusal.
2955 * @host: ATA host to print info about
2956 *
2957 * FIXME: complete this.
2958 *
2959 * LOCKING:
2960 * Inherited from caller.
2961 */
2962 static void mv_print_info(struct ata_host *host)
2963 {
2964 struct pci_dev *pdev = to_pci_dev(host->dev);
2965 struct mv_host_priv *hpriv = host->private_data;
2966 u8 scc;
2967 const char *scc_s, *gen;
2968
2969 /* Use this to determine the HW stepping of the chip so we know
2970 * what errata to workaround
2971 */
2972 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2973 if (scc == 0)
2974 scc_s = "SCSI";
2975 else if (scc == 0x01)
2976 scc_s = "RAID";
2977 else
2978 scc_s = "?";
2979
2980 if (IS_GEN_I(hpriv))
2981 gen = "I";
2982 else if (IS_GEN_II(hpriv))
2983 gen = "II";
2984 else if (IS_GEN_IIE(hpriv))
2985 gen = "IIE";
2986 else
2987 gen = "?";
2988
2989 dev_printk(KERN_INFO, &pdev->dev,
2990 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2991 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2992 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2993 }
2994
2995 /**
2996 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2997 * @pdev: PCI device found
2998 * @ent: PCI device ID entry for the matched host
2999 *
3000 * LOCKING:
3001 * Inherited from caller.
3002 */
3003 static int mv_pci_init_one(struct pci_dev *pdev,
3004 const struct pci_device_id *ent)
3005 {
3006 static int printed_version;
3007 unsigned int board_idx = (unsigned int)ent->driver_data;
3008 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3009 struct ata_host *host;
3010 struct mv_host_priv *hpriv;
3011 int n_ports, rc;
3012
3013 if (!printed_version++)
3014 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3015
3016 /* allocate host */
3017 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3018
3019 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3020 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3021 if (!host || !hpriv)
3022 return -ENOMEM;
3023 host->private_data = hpriv;
3024 hpriv->n_ports = n_ports;
3025
3026 /* acquire resources */
3027 rc = pcim_enable_device(pdev);
3028 if (rc)
3029 return rc;
3030
3031 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3032 if (rc == -EBUSY)
3033 pcim_pin_device(pdev);
3034 if (rc)
3035 return rc;
3036 host->iomap = pcim_iomap_table(pdev);
3037 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3038
3039 rc = pci_go_64(pdev);
3040 if (rc)
3041 return rc;
3042
3043 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3044 if (rc)
3045 return rc;
3046
3047 /* initialize adapter */
3048 rc = mv_init_host(host, board_idx);
3049 if (rc)
3050 return rc;
3051
3052 /* Enable interrupts */
3053 if (msi && pci_enable_msi(pdev))
3054 pci_intx(pdev, 1);
3055
3056 mv_dump_pci_cfg(pdev, 0x68);
3057 mv_print_info(host);
3058
3059 pci_set_master(pdev);
3060 pci_try_set_mwi(pdev);
3061 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3062 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3063 }
3064 #endif
3065
3066 static int mv_platform_probe(struct platform_device *pdev);
3067 static int __devexit mv_platform_remove(struct platform_device *pdev);
3068
3069 static int __init mv_init(void)
3070 {
3071 int rc = -ENODEV;
3072 #ifdef CONFIG_PCI
3073 rc = pci_register_driver(&mv_pci_driver);
3074 if (rc < 0)
3075 return rc;
3076 #endif
3077 rc = platform_driver_register(&mv_platform_driver);
3078
3079 #ifdef CONFIG_PCI
3080 if (rc < 0)
3081 pci_unregister_driver(&mv_pci_driver);
3082 #endif
3083 return rc;
3084 }
3085
3086 static void __exit mv_exit(void)
3087 {
3088 #ifdef CONFIG_PCI
3089 pci_unregister_driver(&mv_pci_driver);
3090 #endif
3091 platform_driver_unregister(&mv_platform_driver);
3092 }
3093
3094 MODULE_AUTHOR("Brett Russ");
3095 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3096 MODULE_LICENSE("GPL");
3097 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3098 MODULE_VERSION(DRV_VERSION);
3099 MODULE_ALIAS("platform:sata_mv");
3100
3101 #ifdef CONFIG_PCI
3102 module_param(msi, int, 0444);
3103 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3104 #endif
3105
3106 module_init(mv_init);
3107 module_exit(mv_exit);
This page took 0.145377 seconds and 5 git commands to generate.