libata: normalize port_info, port_operations and sht tables
[deliverable/linux.git] / drivers / ata / sata_mv.c
1 /*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
39
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
44 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
62 */
63
64
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
81
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
84
85 enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
121 MV_MAX_SG_CT = 256,
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
123
124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
140
141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
155 /* PCI interface registers */
156
157 PCI_COMMAND_OFS = 0xc00,
158
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
182
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
226 PHY_MODE3 = 0x310,
227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
260
261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
268
269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
277
278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
293 EDMA_ERR_CRQB_PAR |
294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
308 EDMA_ERR_CRQB_PAR |
309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
312
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
328
329 EDMA_IORDY_TMOUT = 0x34,
330 EDMA_ARB_CFG = 0x38,
331
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
338 MV_HP_ERRATA_XX42A0 = (1 << 5),
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
343
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
348 };
349
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
354
355 enum {
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
360
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368 };
369
370 enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
376 chip_6042,
377 chip_7042,
378 chip_soc,
379 };
380
381 /* Command ReQuest Block: 32B */
382 struct mv_crqb {
383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
387 };
388
389 struct mv_crqb_iie {
390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
395 };
396
397 /* Command ResPonse Block: 8B */
398 struct mv_crpb {
399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
402 };
403
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405 struct mv_sg {
406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
410 };
411
412 struct mv_port_priv {
413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
423 u32 pp_flags;
424 };
425
426 struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429 };
430
431 struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
450 };
451
452 struct mv_hw_ops {
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
462 };
463
464 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
468 static int mv_port_start(struct ata_port *ap);
469 static void mv_port_stop(struct ata_port *ap);
470 static void mv_qc_prep(struct ata_queued_cmd *qc);
471 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
472 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
473 static void mv_error_handler(struct ata_port *ap);
474 static void mv_eh_freeze(struct ata_port *ap);
475 static void mv_eh_thaw(struct ata_port *ap);
476 static void mv6_dev_config(struct ata_device *dev);
477
478 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
480 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
483 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
485 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
486 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
487
488 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489 unsigned int port);
490 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
492 void __iomem *mmio);
493 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int n_hc);
495 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
496 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
497 void __iomem *mmio);
498 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
499 void __iomem *mmio);
500 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501 void __iomem *mmio, unsigned int n_hc);
502 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
503 void __iomem *mmio);
504 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
505 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
506 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int port_no);
508 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509 void __iomem *port_mmio, int want_ncq);
510 static int __mv_stop_dma(struct ata_port *ap);
511
512 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
515 */
516 static struct scsi_host_template mv5_sht = {
517 .module = THIS_MODULE,
518 .name = DRV_NAME,
519 .ioctl = ata_scsi_ioctl,
520 .queuecommand = ata_scsi_queuecmd,
521 .can_queue = ATA_DEF_QUEUE,
522 .this_id = ATA_SHT_THIS_ID,
523 .sg_tablesize = MV_MAX_SG_CT / 2,
524 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
525 .emulated = ATA_SHT_EMULATED,
526 .use_clustering = 1,
527 .proc_name = DRV_NAME,
528 .dma_boundary = MV_DMA_BOUNDARY,
529 .slave_configure = ata_scsi_slave_config,
530 .slave_destroy = ata_scsi_slave_destroy,
531 .bios_param = ata_std_bios_param,
532 };
533
534 static struct scsi_host_template mv6_sht = {
535 .module = THIS_MODULE,
536 .name = DRV_NAME,
537 .ioctl = ata_scsi_ioctl,
538 .queuecommand = ata_scsi_queuecmd,
539 .change_queue_depth = ata_scsi_change_queue_depth,
540 .can_queue = MV_MAX_Q_DEPTH - 1,
541 .this_id = ATA_SHT_THIS_ID,
542 .sg_tablesize = MV_MAX_SG_CT / 2,
543 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
544 .emulated = ATA_SHT_EMULATED,
545 .use_clustering = 1,
546 .proc_name = DRV_NAME,
547 .dma_boundary = MV_DMA_BOUNDARY,
548 .slave_configure = ata_scsi_slave_config,
549 .slave_destroy = ata_scsi_slave_destroy,
550 .bios_param = ata_std_bios_param,
551 };
552
553 static const struct ata_port_operations mv5_ops = {
554 .tf_load = ata_tf_load,
555 .tf_read = ata_tf_read,
556 .check_status = ata_check_status,
557 .exec_command = ata_exec_command,
558 .dev_select = ata_std_dev_select,
559
560 .qc_prep = mv_qc_prep,
561 .qc_issue = mv_qc_issue,
562 .data_xfer = ata_data_xfer,
563
564 .irq_clear = ata_noop_irq_clear,
565 .irq_on = ata_irq_on,
566
567 .error_handler = mv_error_handler,
568 .freeze = mv_eh_freeze,
569 .thaw = mv_eh_thaw,
570
571 .scr_read = mv5_scr_read,
572 .scr_write = mv5_scr_write,
573
574 .port_start = mv_port_start,
575 .port_stop = mv_port_stop,
576 };
577
578 static const struct ata_port_operations mv6_ops = {
579 .dev_config = mv6_dev_config,
580 .tf_load = ata_tf_load,
581 .tf_read = ata_tf_read,
582 .check_status = ata_check_status,
583 .exec_command = ata_exec_command,
584 .dev_select = ata_std_dev_select,
585
586 .qc_prep = mv_qc_prep,
587 .qc_issue = mv_qc_issue,
588 .data_xfer = ata_data_xfer,
589
590 .irq_clear = ata_noop_irq_clear,
591 .irq_on = ata_irq_on,
592
593 .error_handler = mv_error_handler,
594 .freeze = mv_eh_freeze,
595 .thaw = mv_eh_thaw,
596 .qc_defer = ata_std_qc_defer,
597
598 .scr_read = mv_scr_read,
599 .scr_write = mv_scr_write,
600
601 .port_start = mv_port_start,
602 .port_stop = mv_port_stop,
603 };
604
605 static const struct ata_port_operations mv_iie_ops = {
606 .tf_load = ata_tf_load,
607 .tf_read = ata_tf_read,
608 .check_status = ata_check_status,
609 .exec_command = ata_exec_command,
610 .dev_select = ata_std_dev_select,
611
612 .qc_prep = mv_qc_prep_iie,
613 .qc_issue = mv_qc_issue,
614 .data_xfer = ata_data_xfer,
615
616 .irq_clear = ata_noop_irq_clear,
617 .irq_on = ata_irq_on,
618
619 .error_handler = mv_error_handler,
620 .freeze = mv_eh_freeze,
621 .thaw = mv_eh_thaw,
622 .qc_defer = ata_std_qc_defer,
623
624 .scr_read = mv_scr_read,
625 .scr_write = mv_scr_write,
626
627 .port_start = mv_port_start,
628 .port_stop = mv_port_stop,
629 };
630
631 static const struct ata_port_info mv_port_info[] = {
632 { /* chip_504x */
633 .flags = MV_COMMON_FLAGS,
634 .pio_mask = 0x1f, /* pio0-4 */
635 .udma_mask = ATA_UDMA6,
636 .port_ops = &mv5_ops,
637 },
638 { /* chip_508x */
639 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
640 .pio_mask = 0x1f, /* pio0-4 */
641 .udma_mask = ATA_UDMA6,
642 .port_ops = &mv5_ops,
643 },
644 { /* chip_5080 */
645 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
646 .pio_mask = 0x1f, /* pio0-4 */
647 .udma_mask = ATA_UDMA6,
648 .port_ops = &mv5_ops,
649 },
650 { /* chip_604x */
651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
652 ATA_FLAG_NCQ,
653 .pio_mask = 0x1f, /* pio0-4 */
654 .udma_mask = ATA_UDMA6,
655 .port_ops = &mv6_ops,
656 },
657 { /* chip_608x */
658 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
659 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
660 .pio_mask = 0x1f, /* pio0-4 */
661 .udma_mask = ATA_UDMA6,
662 .port_ops = &mv6_ops,
663 },
664 { /* chip_6042 */
665 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
666 ATA_FLAG_NCQ,
667 .pio_mask = 0x1f, /* pio0-4 */
668 .udma_mask = ATA_UDMA6,
669 .port_ops = &mv_iie_ops,
670 },
671 { /* chip_7042 */
672 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
673 ATA_FLAG_NCQ,
674 .pio_mask = 0x1f, /* pio0-4 */
675 .udma_mask = ATA_UDMA6,
676 .port_ops = &mv_iie_ops,
677 },
678 { /* chip_soc */
679 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
680 .pio_mask = 0x1f, /* pio0-4 */
681 .udma_mask = ATA_UDMA6,
682 .port_ops = &mv_iie_ops,
683 },
684 };
685
686 static const struct pci_device_id mv_pci_tbl[] = {
687 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
688 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
689 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
690 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
691 /* RocketRAID 1740/174x have different identifiers */
692 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
693 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
694
695 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
696 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
697 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
698 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
699 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
700
701 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
702
703 /* Adaptec 1430SA */
704 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
705
706 /* Marvell 7042 support */
707 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
708
709 /* Highpoint RocketRAID PCIe series */
710 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
711 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
712
713 { } /* terminate list */
714 };
715
716 static const struct mv_hw_ops mv5xxx_ops = {
717 .phy_errata = mv5_phy_errata,
718 .enable_leds = mv5_enable_leds,
719 .read_preamp = mv5_read_preamp,
720 .reset_hc = mv5_reset_hc,
721 .reset_flash = mv5_reset_flash,
722 .reset_bus = mv5_reset_bus,
723 };
724
725 static const struct mv_hw_ops mv6xxx_ops = {
726 .phy_errata = mv6_phy_errata,
727 .enable_leds = mv6_enable_leds,
728 .read_preamp = mv6_read_preamp,
729 .reset_hc = mv6_reset_hc,
730 .reset_flash = mv6_reset_flash,
731 .reset_bus = mv_reset_pci_bus,
732 };
733
734 static const struct mv_hw_ops mv_soc_ops = {
735 .phy_errata = mv6_phy_errata,
736 .enable_leds = mv_soc_enable_leds,
737 .read_preamp = mv_soc_read_preamp,
738 .reset_hc = mv_soc_reset_hc,
739 .reset_flash = mv_soc_reset_flash,
740 .reset_bus = mv_soc_reset_bus,
741 };
742
743 /*
744 * Functions
745 */
746
747 static inline void writelfl(unsigned long data, void __iomem *addr)
748 {
749 writel(data, addr);
750 (void) readl(addr); /* flush to avoid PCI posted write */
751 }
752
753 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
754 {
755 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
756 }
757
758 static inline unsigned int mv_hc_from_port(unsigned int port)
759 {
760 return port >> MV_PORT_HC_SHIFT;
761 }
762
763 static inline unsigned int mv_hardport_from_port(unsigned int port)
764 {
765 return port & MV_PORT_MASK;
766 }
767
768 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
769 unsigned int port)
770 {
771 return mv_hc_base(base, mv_hc_from_port(port));
772 }
773
774 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
775 {
776 return mv_hc_base_from_port(base, port) +
777 MV_SATAHC_ARBTR_REG_SZ +
778 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
779 }
780
781 static inline void __iomem *mv_host_base(struct ata_host *host)
782 {
783 struct mv_host_priv *hpriv = host->private_data;
784 return hpriv->base;
785 }
786
787 static inline void __iomem *mv_ap_base(struct ata_port *ap)
788 {
789 return mv_port_base(mv_host_base(ap->host), ap->port_no);
790 }
791
792 static inline int mv_get_hc_count(unsigned long port_flags)
793 {
794 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
795 }
796
797 static void mv_set_edma_ptrs(void __iomem *port_mmio,
798 struct mv_host_priv *hpriv,
799 struct mv_port_priv *pp)
800 {
801 u32 index;
802
803 /*
804 * initialize request queue
805 */
806 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
807
808 WARN_ON(pp->crqb_dma & 0x3ff);
809 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
810 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
811 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
812
813 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
814 writelfl((pp->crqb_dma & 0xffffffff) | index,
815 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
816 else
817 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
818
819 /*
820 * initialize response queue
821 */
822 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
823
824 WARN_ON(pp->crpb_dma & 0xff);
825 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
826
827 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
828 writelfl((pp->crpb_dma & 0xffffffff) | index,
829 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
830 else
831 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
832
833 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
834 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
835 }
836
837 /**
838 * mv_start_dma - Enable eDMA engine
839 * @base: port base address
840 * @pp: port private data
841 *
842 * Verify the local cache of the eDMA state is accurate with a
843 * WARN_ON.
844 *
845 * LOCKING:
846 * Inherited from caller.
847 */
848 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
849 struct mv_port_priv *pp, u8 protocol)
850 {
851 int want_ncq = (protocol == ATA_PROT_NCQ);
852
853 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
854 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
855 if (want_ncq != using_ncq)
856 __mv_stop_dma(ap);
857 }
858 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
859 struct mv_host_priv *hpriv = ap->host->private_data;
860 int hard_port = mv_hardport_from_port(ap->port_no);
861 void __iomem *hc_mmio = mv_hc_base_from_port(
862 mv_host_base(ap->host), hard_port);
863 u32 hc_irq_cause, ipending;
864
865 /* clear EDMA event indicators, if any */
866 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
867
868 /* clear EDMA interrupt indicator, if any */
869 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
870 ipending = (DEV_IRQ << hard_port) |
871 (CRPB_DMA_DONE << hard_port);
872 if (hc_irq_cause & ipending) {
873 writelfl(hc_irq_cause & ~ipending,
874 hc_mmio + HC_IRQ_CAUSE_OFS);
875 }
876
877 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
878
879 /* clear FIS IRQ Cause */
880 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
881
882 mv_set_edma_ptrs(port_mmio, hpriv, pp);
883
884 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
885 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
886 }
887 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
888 }
889
890 /**
891 * __mv_stop_dma - Disable eDMA engine
892 * @ap: ATA channel to manipulate
893 *
894 * Verify the local cache of the eDMA state is accurate with a
895 * WARN_ON.
896 *
897 * LOCKING:
898 * Inherited from caller.
899 */
900 static int __mv_stop_dma(struct ata_port *ap)
901 {
902 void __iomem *port_mmio = mv_ap_base(ap);
903 struct mv_port_priv *pp = ap->private_data;
904 u32 reg;
905 int i, err = 0;
906
907 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
908 /* Disable EDMA if active. The disable bit auto clears.
909 */
910 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
911 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
912 } else {
913 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
914 }
915
916 /* now properly wait for the eDMA to stop */
917 for (i = 1000; i > 0; i--) {
918 reg = readl(port_mmio + EDMA_CMD_OFS);
919 if (!(reg & EDMA_EN))
920 break;
921
922 udelay(100);
923 }
924
925 if (reg & EDMA_EN) {
926 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
927 err = -EIO;
928 }
929
930 return err;
931 }
932
933 static int mv_stop_dma(struct ata_port *ap)
934 {
935 unsigned long flags;
936 int rc;
937
938 spin_lock_irqsave(&ap->host->lock, flags);
939 rc = __mv_stop_dma(ap);
940 spin_unlock_irqrestore(&ap->host->lock, flags);
941
942 return rc;
943 }
944
945 #ifdef ATA_DEBUG
946 static void mv_dump_mem(void __iomem *start, unsigned bytes)
947 {
948 int b, w;
949 for (b = 0; b < bytes; ) {
950 DPRINTK("%p: ", start + b);
951 for (w = 0; b < bytes && w < 4; w++) {
952 printk("%08x ", readl(start + b));
953 b += sizeof(u32);
954 }
955 printk("\n");
956 }
957 }
958 #endif
959
960 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
961 {
962 #ifdef ATA_DEBUG
963 int b, w;
964 u32 dw;
965 for (b = 0; b < bytes; ) {
966 DPRINTK("%02x: ", b);
967 for (w = 0; b < bytes && w < 4; w++) {
968 (void) pci_read_config_dword(pdev, b, &dw);
969 printk("%08x ", dw);
970 b += sizeof(u32);
971 }
972 printk("\n");
973 }
974 #endif
975 }
976 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
977 struct pci_dev *pdev)
978 {
979 #ifdef ATA_DEBUG
980 void __iomem *hc_base = mv_hc_base(mmio_base,
981 port >> MV_PORT_HC_SHIFT);
982 void __iomem *port_base;
983 int start_port, num_ports, p, start_hc, num_hcs, hc;
984
985 if (0 > port) {
986 start_hc = start_port = 0;
987 num_ports = 8; /* shld be benign for 4 port devs */
988 num_hcs = 2;
989 } else {
990 start_hc = port >> MV_PORT_HC_SHIFT;
991 start_port = port;
992 num_ports = num_hcs = 1;
993 }
994 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
995 num_ports > 1 ? num_ports - 1 : start_port);
996
997 if (NULL != pdev) {
998 DPRINTK("PCI config space regs:\n");
999 mv_dump_pci_cfg(pdev, 0x68);
1000 }
1001 DPRINTK("PCI regs:\n");
1002 mv_dump_mem(mmio_base+0xc00, 0x3c);
1003 mv_dump_mem(mmio_base+0xd00, 0x34);
1004 mv_dump_mem(mmio_base+0xf00, 0x4);
1005 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1006 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1007 hc_base = mv_hc_base(mmio_base, hc);
1008 DPRINTK("HC regs (HC %i):\n", hc);
1009 mv_dump_mem(hc_base, 0x1c);
1010 }
1011 for (p = start_port; p < start_port + num_ports; p++) {
1012 port_base = mv_port_base(mmio_base, p);
1013 DPRINTK("EDMA regs (port %i):\n", p);
1014 mv_dump_mem(port_base, 0x54);
1015 DPRINTK("SATA regs (port %i):\n", p);
1016 mv_dump_mem(port_base+0x300, 0x60);
1017 }
1018 #endif
1019 }
1020
1021 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1022 {
1023 unsigned int ofs;
1024
1025 switch (sc_reg_in) {
1026 case SCR_STATUS:
1027 case SCR_CONTROL:
1028 case SCR_ERROR:
1029 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1030 break;
1031 case SCR_ACTIVE:
1032 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1033 break;
1034 default:
1035 ofs = 0xffffffffU;
1036 break;
1037 }
1038 return ofs;
1039 }
1040
1041 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1042 {
1043 unsigned int ofs = mv_scr_offset(sc_reg_in);
1044
1045 if (ofs != 0xffffffffU) {
1046 *val = readl(mv_ap_base(ap) + ofs);
1047 return 0;
1048 } else
1049 return -EINVAL;
1050 }
1051
1052 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1053 {
1054 unsigned int ofs = mv_scr_offset(sc_reg_in);
1055
1056 if (ofs != 0xffffffffU) {
1057 writelfl(val, mv_ap_base(ap) + ofs);
1058 return 0;
1059 } else
1060 return -EINVAL;
1061 }
1062
1063 static void mv6_dev_config(struct ata_device *adev)
1064 {
1065 /*
1066 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1067 * See mv_qc_prep() for more info.
1068 */
1069 if (adev->flags & ATA_DFLAG_NCQ)
1070 if (adev->max_sectors > ATA_MAX_SECTORS)
1071 adev->max_sectors = ATA_MAX_SECTORS;
1072 }
1073
1074 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1075 void __iomem *port_mmio, int want_ncq)
1076 {
1077 u32 cfg;
1078
1079 /* set up non-NCQ EDMA configuration */
1080 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1081
1082 if (IS_GEN_I(hpriv))
1083 cfg |= (1 << 8); /* enab config burst size mask */
1084
1085 else if (IS_GEN_II(hpriv))
1086 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1087
1088 else if (IS_GEN_IIE(hpriv)) {
1089 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1090 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1091 cfg |= (1 << 18); /* enab early completion */
1092 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1093 }
1094
1095 if (want_ncq) {
1096 cfg |= EDMA_CFG_NCQ;
1097 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1098 } else
1099 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1100
1101 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1102 }
1103
1104 static void mv_port_free_dma_mem(struct ata_port *ap)
1105 {
1106 struct mv_host_priv *hpriv = ap->host->private_data;
1107 struct mv_port_priv *pp = ap->private_data;
1108 int tag;
1109
1110 if (pp->crqb) {
1111 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1112 pp->crqb = NULL;
1113 }
1114 if (pp->crpb) {
1115 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1116 pp->crpb = NULL;
1117 }
1118 /*
1119 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1120 * For later hardware, we have one unique sg_tbl per NCQ tag.
1121 */
1122 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1123 if (pp->sg_tbl[tag]) {
1124 if (tag == 0 || !IS_GEN_I(hpriv))
1125 dma_pool_free(hpriv->sg_tbl_pool,
1126 pp->sg_tbl[tag],
1127 pp->sg_tbl_dma[tag]);
1128 pp->sg_tbl[tag] = NULL;
1129 }
1130 }
1131 }
1132
1133 /**
1134 * mv_port_start - Port specific init/start routine.
1135 * @ap: ATA channel to manipulate
1136 *
1137 * Allocate and point to DMA memory, init port private memory,
1138 * zero indices.
1139 *
1140 * LOCKING:
1141 * Inherited from caller.
1142 */
1143 static int mv_port_start(struct ata_port *ap)
1144 {
1145 struct device *dev = ap->host->dev;
1146 struct mv_host_priv *hpriv = ap->host->private_data;
1147 struct mv_port_priv *pp;
1148 void __iomem *port_mmio = mv_ap_base(ap);
1149 unsigned long flags;
1150 int tag;
1151
1152 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1153 if (!pp)
1154 return -ENOMEM;
1155 ap->private_data = pp;
1156
1157 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1158 if (!pp->crqb)
1159 return -ENOMEM;
1160 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1161
1162 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1163 if (!pp->crpb)
1164 goto out_port_free_dma_mem;
1165 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1166
1167 /*
1168 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1169 * For later hardware, we need one unique sg_tbl per NCQ tag.
1170 */
1171 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1172 if (tag == 0 || !IS_GEN_I(hpriv)) {
1173 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1174 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1175 if (!pp->sg_tbl[tag])
1176 goto out_port_free_dma_mem;
1177 } else {
1178 pp->sg_tbl[tag] = pp->sg_tbl[0];
1179 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1180 }
1181 }
1182
1183 spin_lock_irqsave(&ap->host->lock, flags);
1184
1185 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1186 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1187
1188 spin_unlock_irqrestore(&ap->host->lock, flags);
1189
1190 /* Don't turn on EDMA here...do it before DMA commands only. Else
1191 * we'll be unable to send non-data, PIO, etc due to restricted access
1192 * to shadow regs.
1193 */
1194 return 0;
1195
1196 out_port_free_dma_mem:
1197 mv_port_free_dma_mem(ap);
1198 return -ENOMEM;
1199 }
1200
1201 /**
1202 * mv_port_stop - Port specific cleanup/stop routine.
1203 * @ap: ATA channel to manipulate
1204 *
1205 * Stop DMA, cleanup port memory.
1206 *
1207 * LOCKING:
1208 * This routine uses the host lock to protect the DMA stop.
1209 */
1210 static void mv_port_stop(struct ata_port *ap)
1211 {
1212 mv_stop_dma(ap);
1213 mv_port_free_dma_mem(ap);
1214 }
1215
1216 /**
1217 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1218 * @qc: queued command whose SG list to source from
1219 *
1220 * Populate the SG list and mark the last entry.
1221 *
1222 * LOCKING:
1223 * Inherited from caller.
1224 */
1225 static void mv_fill_sg(struct ata_queued_cmd *qc)
1226 {
1227 struct mv_port_priv *pp = qc->ap->private_data;
1228 struct scatterlist *sg;
1229 struct mv_sg *mv_sg, *last_sg = NULL;
1230 unsigned int si;
1231
1232 mv_sg = pp->sg_tbl[qc->tag];
1233 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1234 dma_addr_t addr = sg_dma_address(sg);
1235 u32 sg_len = sg_dma_len(sg);
1236
1237 while (sg_len) {
1238 u32 offset = addr & 0xffff;
1239 u32 len = sg_len;
1240
1241 if ((offset + sg_len > 0x10000))
1242 len = 0x10000 - offset;
1243
1244 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1245 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1246 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1247
1248 sg_len -= len;
1249 addr += len;
1250
1251 last_sg = mv_sg;
1252 mv_sg++;
1253 }
1254 }
1255
1256 if (likely(last_sg))
1257 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1258 }
1259
1260 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1261 {
1262 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1263 (last ? CRQB_CMD_LAST : 0);
1264 *cmdw = cpu_to_le16(tmp);
1265 }
1266
1267 /**
1268 * mv_qc_prep - Host specific command preparation.
1269 * @qc: queued command to prepare
1270 *
1271 * This routine simply redirects to the general purpose routine
1272 * if command is not DMA. Else, it handles prep of the CRQB
1273 * (command request block), does some sanity checking, and calls
1274 * the SG load routine.
1275 *
1276 * LOCKING:
1277 * Inherited from caller.
1278 */
1279 static void mv_qc_prep(struct ata_queued_cmd *qc)
1280 {
1281 struct ata_port *ap = qc->ap;
1282 struct mv_port_priv *pp = ap->private_data;
1283 __le16 *cw;
1284 struct ata_taskfile *tf;
1285 u16 flags = 0;
1286 unsigned in_index;
1287
1288 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1289 (qc->tf.protocol != ATA_PROT_NCQ))
1290 return;
1291
1292 /* Fill in command request block
1293 */
1294 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1295 flags |= CRQB_FLAG_READ;
1296 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1297 flags |= qc->tag << CRQB_TAG_SHIFT;
1298
1299 /* get current queue index from software */
1300 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1301
1302 pp->crqb[in_index].sg_addr =
1303 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1304 pp->crqb[in_index].sg_addr_hi =
1305 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1306 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1307
1308 cw = &pp->crqb[in_index].ata_cmd[0];
1309 tf = &qc->tf;
1310
1311 /* Sadly, the CRQB cannot accomodate all registers--there are
1312 * only 11 bytes...so we must pick and choose required
1313 * registers based on the command. So, we drop feature and
1314 * hob_feature for [RW] DMA commands, but they are needed for
1315 * NCQ. NCQ will drop hob_nsect.
1316 */
1317 switch (tf->command) {
1318 case ATA_CMD_READ:
1319 case ATA_CMD_READ_EXT:
1320 case ATA_CMD_WRITE:
1321 case ATA_CMD_WRITE_EXT:
1322 case ATA_CMD_WRITE_FUA_EXT:
1323 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1324 break;
1325 case ATA_CMD_FPDMA_READ:
1326 case ATA_CMD_FPDMA_WRITE:
1327 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1328 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1329 break;
1330 default:
1331 /* The only other commands EDMA supports in non-queued and
1332 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1333 * of which are defined/used by Linux. If we get here, this
1334 * driver needs work.
1335 *
1336 * FIXME: modify libata to give qc_prep a return value and
1337 * return error here.
1338 */
1339 BUG_ON(tf->command);
1340 break;
1341 }
1342 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1343 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1344 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1345 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1346 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1347 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1348 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1349 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1350 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1351
1352 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1353 return;
1354 mv_fill_sg(qc);
1355 }
1356
1357 /**
1358 * mv_qc_prep_iie - Host specific command preparation.
1359 * @qc: queued command to prepare
1360 *
1361 * This routine simply redirects to the general purpose routine
1362 * if command is not DMA. Else, it handles prep of the CRQB
1363 * (command request block), does some sanity checking, and calls
1364 * the SG load routine.
1365 *
1366 * LOCKING:
1367 * Inherited from caller.
1368 */
1369 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1370 {
1371 struct ata_port *ap = qc->ap;
1372 struct mv_port_priv *pp = ap->private_data;
1373 struct mv_crqb_iie *crqb;
1374 struct ata_taskfile *tf;
1375 unsigned in_index;
1376 u32 flags = 0;
1377
1378 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1379 (qc->tf.protocol != ATA_PROT_NCQ))
1380 return;
1381
1382 /* Fill in Gen IIE command request block
1383 */
1384 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1385 flags |= CRQB_FLAG_READ;
1386
1387 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1388 flags |= qc->tag << CRQB_TAG_SHIFT;
1389 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1390
1391 /* get current queue index from software */
1392 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1393
1394 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1395 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1396 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1397 crqb->flags = cpu_to_le32(flags);
1398
1399 tf = &qc->tf;
1400 crqb->ata_cmd[0] = cpu_to_le32(
1401 (tf->command << 16) |
1402 (tf->feature << 24)
1403 );
1404 crqb->ata_cmd[1] = cpu_to_le32(
1405 (tf->lbal << 0) |
1406 (tf->lbam << 8) |
1407 (tf->lbah << 16) |
1408 (tf->device << 24)
1409 );
1410 crqb->ata_cmd[2] = cpu_to_le32(
1411 (tf->hob_lbal << 0) |
1412 (tf->hob_lbam << 8) |
1413 (tf->hob_lbah << 16) |
1414 (tf->hob_feature << 24)
1415 );
1416 crqb->ata_cmd[3] = cpu_to_le32(
1417 (tf->nsect << 0) |
1418 (tf->hob_nsect << 8)
1419 );
1420
1421 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1422 return;
1423 mv_fill_sg(qc);
1424 }
1425
1426 /**
1427 * mv_qc_issue - Initiate a command to the host
1428 * @qc: queued command to start
1429 *
1430 * This routine simply redirects to the general purpose routine
1431 * if command is not DMA. Else, it sanity checks our local
1432 * caches of the request producer/consumer indices then enables
1433 * DMA and bumps the request producer index.
1434 *
1435 * LOCKING:
1436 * Inherited from caller.
1437 */
1438 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1439 {
1440 struct ata_port *ap = qc->ap;
1441 void __iomem *port_mmio = mv_ap_base(ap);
1442 struct mv_port_priv *pp = ap->private_data;
1443 u32 in_index;
1444
1445 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1446 (qc->tf.protocol != ATA_PROT_NCQ)) {
1447 /* We're about to send a non-EDMA capable command to the
1448 * port. Turn off EDMA so there won't be problems accessing
1449 * shadow block, etc registers.
1450 */
1451 __mv_stop_dma(ap);
1452 return ata_qc_issue_prot(qc);
1453 }
1454
1455 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1456
1457 pp->req_idx++;
1458
1459 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1460
1461 /* and write the request in pointer to kick the EDMA to life */
1462 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1463 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1464
1465 return 0;
1466 }
1467
1468 /**
1469 * mv_err_intr - Handle error interrupts on the port
1470 * @ap: ATA channel to manipulate
1471 * @reset_allowed: bool: 0 == don't trigger from reset here
1472 *
1473 * In most cases, just clear the interrupt and move on. However,
1474 * some cases require an eDMA reset, which is done right before
1475 * the COMRESET in mv_phy_reset(). The SERR case requires a
1476 * clear of pending errors in the SATA SERROR register. Finally,
1477 * if the port disabled DMA, update our cached copy to match.
1478 *
1479 * LOCKING:
1480 * Inherited from caller.
1481 */
1482 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1483 {
1484 void __iomem *port_mmio = mv_ap_base(ap);
1485 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1486 struct mv_port_priv *pp = ap->private_data;
1487 struct mv_host_priv *hpriv = ap->host->private_data;
1488 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1489 unsigned int action = 0, err_mask = 0;
1490 struct ata_eh_info *ehi = &ap->link.eh_info;
1491
1492 ata_ehi_clear_desc(ehi);
1493
1494 if (!edma_enabled) {
1495 /* just a guess: do we need to do this? should we
1496 * expand this, and do it in all cases?
1497 */
1498 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1499 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1500 }
1501
1502 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1503
1504 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1505
1506 /*
1507 * all generations share these EDMA error cause bits
1508 */
1509
1510 if (edma_err_cause & EDMA_ERR_DEV)
1511 err_mask |= AC_ERR_DEV;
1512 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1513 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1514 EDMA_ERR_INTRL_PAR)) {
1515 err_mask |= AC_ERR_ATA_BUS;
1516 action |= ATA_EH_RESET;
1517 ata_ehi_push_desc(ehi, "parity error");
1518 }
1519 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1520 ata_ehi_hotplugged(ehi);
1521 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1522 "dev disconnect" : "dev connect");
1523 action |= ATA_EH_RESET;
1524 }
1525
1526 if (IS_GEN_I(hpriv)) {
1527 eh_freeze_mask = EDMA_EH_FREEZE_5;
1528
1529 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1530 pp = ap->private_data;
1531 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1532 ata_ehi_push_desc(ehi, "EDMA self-disable");
1533 }
1534 } else {
1535 eh_freeze_mask = EDMA_EH_FREEZE;
1536
1537 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1538 pp = ap->private_data;
1539 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1540 ata_ehi_push_desc(ehi, "EDMA self-disable");
1541 }
1542
1543 if (edma_err_cause & EDMA_ERR_SERR) {
1544 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1545 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1546 err_mask = AC_ERR_ATA_BUS;
1547 action |= ATA_EH_RESET;
1548 }
1549 }
1550
1551 /* Clear EDMA now that SERR cleanup done */
1552 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1553
1554 if (!err_mask) {
1555 err_mask = AC_ERR_OTHER;
1556 action |= ATA_EH_RESET;
1557 }
1558
1559 ehi->serror |= serr;
1560 ehi->action |= action;
1561
1562 if (qc)
1563 qc->err_mask |= err_mask;
1564 else
1565 ehi->err_mask |= err_mask;
1566
1567 if (edma_err_cause & eh_freeze_mask)
1568 ata_port_freeze(ap);
1569 else
1570 ata_port_abort(ap);
1571 }
1572
1573 static void mv_intr_pio(struct ata_port *ap)
1574 {
1575 struct ata_queued_cmd *qc;
1576 u8 ata_status;
1577
1578 /* ignore spurious intr if drive still BUSY */
1579 ata_status = readb(ap->ioaddr.status_addr);
1580 if (unlikely(ata_status & ATA_BUSY))
1581 return;
1582
1583 /* get active ATA command */
1584 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1585 if (unlikely(!qc)) /* no active tag */
1586 return;
1587 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1588 return;
1589
1590 /* and finally, complete the ATA command */
1591 qc->err_mask |= ac_err_mask(ata_status);
1592 ata_qc_complete(qc);
1593 }
1594
1595 static void mv_intr_edma(struct ata_port *ap)
1596 {
1597 void __iomem *port_mmio = mv_ap_base(ap);
1598 struct mv_host_priv *hpriv = ap->host->private_data;
1599 struct mv_port_priv *pp = ap->private_data;
1600 struct ata_queued_cmd *qc;
1601 u32 out_index, in_index;
1602 bool work_done = false;
1603
1604 /* get h/w response queue pointer */
1605 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1606 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1607
1608 while (1) {
1609 u16 status;
1610 unsigned int tag;
1611
1612 /* get s/w response queue last-read pointer, and compare */
1613 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1614 if (in_index == out_index)
1615 break;
1616
1617 /* 50xx: get active ATA command */
1618 if (IS_GEN_I(hpriv))
1619 tag = ap->link.active_tag;
1620
1621 /* Gen II/IIE: get active ATA command via tag, to enable
1622 * support for queueing. this works transparently for
1623 * queued and non-queued modes.
1624 */
1625 else
1626 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1627
1628 qc = ata_qc_from_tag(ap, tag);
1629
1630 /* For non-NCQ mode, the lower 8 bits of status
1631 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1632 * which should be zero if all went well.
1633 */
1634 status = le16_to_cpu(pp->crpb[out_index].flags);
1635 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1636 mv_err_intr(ap, qc);
1637 return;
1638 }
1639
1640 /* and finally, complete the ATA command */
1641 if (qc) {
1642 qc->err_mask |=
1643 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1644 ata_qc_complete(qc);
1645 }
1646
1647 /* advance software response queue pointer, to
1648 * indicate (after the loop completes) to hardware
1649 * that we have consumed a response queue entry.
1650 */
1651 work_done = true;
1652 pp->resp_idx++;
1653 }
1654
1655 if (work_done)
1656 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1657 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1658 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1659 }
1660
1661 /**
1662 * mv_host_intr - Handle all interrupts on the given host controller
1663 * @host: host specific structure
1664 * @relevant: port error bits relevant to this host controller
1665 * @hc: which host controller we're to look at
1666 *
1667 * Read then write clear the HC interrupt status then walk each
1668 * port connected to the HC and see if it needs servicing. Port
1669 * success ints are reported in the HC interrupt status reg, the
1670 * port error ints are reported in the higher level main
1671 * interrupt status register and thus are passed in via the
1672 * 'relevant' argument.
1673 *
1674 * LOCKING:
1675 * Inherited from caller.
1676 */
1677 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1678 {
1679 struct mv_host_priv *hpriv = host->private_data;
1680 void __iomem *mmio = hpriv->base;
1681 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1682 u32 hc_irq_cause;
1683 int port, port0, last_port;
1684
1685 if (hc == 0)
1686 port0 = 0;
1687 else
1688 port0 = MV_PORTS_PER_HC;
1689
1690 if (HAS_PCI(host))
1691 last_port = port0 + MV_PORTS_PER_HC;
1692 else
1693 last_port = port0 + hpriv->n_ports;
1694 /* we'll need the HC success int register in most cases */
1695 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1696 if (!hc_irq_cause)
1697 return;
1698
1699 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1700
1701 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1702 hc, relevant, hc_irq_cause);
1703
1704 for (port = port0; port < last_port; port++) {
1705 struct ata_port *ap = host->ports[port];
1706 struct mv_port_priv *pp;
1707 int have_err_bits, hard_port, shift;
1708
1709 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1710 continue;
1711
1712 pp = ap->private_data;
1713
1714 shift = port << 1; /* (port * 2) */
1715 if (port >= MV_PORTS_PER_HC) {
1716 shift++; /* skip bit 8 in the HC Main IRQ reg */
1717 }
1718 have_err_bits = ((PORT0_ERR << shift) & relevant);
1719
1720 if (unlikely(have_err_bits)) {
1721 struct ata_queued_cmd *qc;
1722
1723 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1724 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1725 continue;
1726
1727 mv_err_intr(ap, qc);
1728 continue;
1729 }
1730
1731 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1732
1733 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1734 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1735 mv_intr_edma(ap);
1736 } else {
1737 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1738 mv_intr_pio(ap);
1739 }
1740 }
1741 VPRINTK("EXIT\n");
1742 }
1743
1744 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1745 {
1746 struct mv_host_priv *hpriv = host->private_data;
1747 struct ata_port *ap;
1748 struct ata_queued_cmd *qc;
1749 struct ata_eh_info *ehi;
1750 unsigned int i, err_mask, printed = 0;
1751 u32 err_cause;
1752
1753 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1754
1755 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1756 err_cause);
1757
1758 DPRINTK("All regs @ PCI error\n");
1759 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1760
1761 writelfl(0, mmio + hpriv->irq_cause_ofs);
1762
1763 for (i = 0; i < host->n_ports; i++) {
1764 ap = host->ports[i];
1765 if (!ata_link_offline(&ap->link)) {
1766 ehi = &ap->link.eh_info;
1767 ata_ehi_clear_desc(ehi);
1768 if (!printed++)
1769 ata_ehi_push_desc(ehi,
1770 "PCI err cause 0x%08x", err_cause);
1771 err_mask = AC_ERR_HOST_BUS;
1772 ehi->action = ATA_EH_RESET;
1773 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1774 if (qc)
1775 qc->err_mask |= err_mask;
1776 else
1777 ehi->err_mask |= err_mask;
1778
1779 ata_port_freeze(ap);
1780 }
1781 }
1782 }
1783
1784 /**
1785 * mv_interrupt - Main interrupt event handler
1786 * @irq: unused
1787 * @dev_instance: private data; in this case the host structure
1788 *
1789 * Read the read only register to determine if any host
1790 * controllers have pending interrupts. If so, call lower level
1791 * routine to handle. Also check for PCI errors which are only
1792 * reported here.
1793 *
1794 * LOCKING:
1795 * This routine holds the host lock while processing pending
1796 * interrupts.
1797 */
1798 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1799 {
1800 struct ata_host *host = dev_instance;
1801 struct mv_host_priv *hpriv = host->private_data;
1802 unsigned int hc, handled = 0, n_hcs;
1803 void __iomem *mmio = hpriv->base;
1804 u32 irq_stat, irq_mask;
1805
1806 spin_lock(&host->lock);
1807
1808 irq_stat = readl(hpriv->main_cause_reg_addr);
1809 irq_mask = readl(hpriv->main_mask_reg_addr);
1810
1811 /* check the cases where we either have nothing pending or have read
1812 * a bogus register value which can indicate HW removal or PCI fault
1813 */
1814 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1815 goto out_unlock;
1816
1817 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1818
1819 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1820 mv_pci_error(host, mmio);
1821 handled = 1;
1822 goto out_unlock; /* skip all other HC irq handling */
1823 }
1824
1825 for (hc = 0; hc < n_hcs; hc++) {
1826 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1827 if (relevant) {
1828 mv_host_intr(host, relevant, hc);
1829 handled = 1;
1830 }
1831 }
1832
1833 out_unlock:
1834 spin_unlock(&host->lock);
1835
1836 return IRQ_RETVAL(handled);
1837 }
1838
1839 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1840 {
1841 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1842 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1843
1844 return hc_mmio + ofs;
1845 }
1846
1847 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1848 {
1849 unsigned int ofs;
1850
1851 switch (sc_reg_in) {
1852 case SCR_STATUS:
1853 case SCR_ERROR:
1854 case SCR_CONTROL:
1855 ofs = sc_reg_in * sizeof(u32);
1856 break;
1857 default:
1858 ofs = 0xffffffffU;
1859 break;
1860 }
1861 return ofs;
1862 }
1863
1864 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1865 {
1866 struct mv_host_priv *hpriv = ap->host->private_data;
1867 void __iomem *mmio = hpriv->base;
1868 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1869 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1870
1871 if (ofs != 0xffffffffU) {
1872 *val = readl(addr + ofs);
1873 return 0;
1874 } else
1875 return -EINVAL;
1876 }
1877
1878 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1879 {
1880 struct mv_host_priv *hpriv = ap->host->private_data;
1881 void __iomem *mmio = hpriv->base;
1882 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1883 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1884
1885 if (ofs != 0xffffffffU) {
1886 writelfl(val, addr + ofs);
1887 return 0;
1888 } else
1889 return -EINVAL;
1890 }
1891
1892 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1893 {
1894 struct pci_dev *pdev = to_pci_dev(host->dev);
1895 int early_5080;
1896
1897 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1898
1899 if (!early_5080) {
1900 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1901 tmp |= (1 << 0);
1902 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1903 }
1904
1905 mv_reset_pci_bus(host, mmio);
1906 }
1907
1908 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1909 {
1910 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1911 }
1912
1913 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1914 void __iomem *mmio)
1915 {
1916 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1917 u32 tmp;
1918
1919 tmp = readl(phy_mmio + MV5_PHY_MODE);
1920
1921 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1922 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1923 }
1924
1925 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1926 {
1927 u32 tmp;
1928
1929 writel(0, mmio + MV_GPIO_PORT_CTL);
1930
1931 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1932
1933 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1934 tmp |= ~(1 << 0);
1935 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1936 }
1937
1938 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1939 unsigned int port)
1940 {
1941 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1942 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1943 u32 tmp;
1944 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1945
1946 if (fix_apm_sq) {
1947 tmp = readl(phy_mmio + MV5_LT_MODE);
1948 tmp |= (1 << 19);
1949 writel(tmp, phy_mmio + MV5_LT_MODE);
1950
1951 tmp = readl(phy_mmio + MV5_PHY_CTL);
1952 tmp &= ~0x3;
1953 tmp |= 0x1;
1954 writel(tmp, phy_mmio + MV5_PHY_CTL);
1955 }
1956
1957 tmp = readl(phy_mmio + MV5_PHY_MODE);
1958 tmp &= ~mask;
1959 tmp |= hpriv->signal[port].pre;
1960 tmp |= hpriv->signal[port].amps;
1961 writel(tmp, phy_mmio + MV5_PHY_MODE);
1962 }
1963
1964
1965 #undef ZERO
1966 #define ZERO(reg) writel(0, port_mmio + (reg))
1967 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1968 unsigned int port)
1969 {
1970 void __iomem *port_mmio = mv_port_base(mmio, port);
1971
1972 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1973
1974 mv_channel_reset(hpriv, mmio, port);
1975
1976 ZERO(0x028); /* command */
1977 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1978 ZERO(0x004); /* timer */
1979 ZERO(0x008); /* irq err cause */
1980 ZERO(0x00c); /* irq err mask */
1981 ZERO(0x010); /* rq bah */
1982 ZERO(0x014); /* rq inp */
1983 ZERO(0x018); /* rq outp */
1984 ZERO(0x01c); /* respq bah */
1985 ZERO(0x024); /* respq outp */
1986 ZERO(0x020); /* respq inp */
1987 ZERO(0x02c); /* test control */
1988 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1989 }
1990 #undef ZERO
1991
1992 #define ZERO(reg) writel(0, hc_mmio + (reg))
1993 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1994 unsigned int hc)
1995 {
1996 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1997 u32 tmp;
1998
1999 ZERO(0x00c);
2000 ZERO(0x010);
2001 ZERO(0x014);
2002 ZERO(0x018);
2003
2004 tmp = readl(hc_mmio + 0x20);
2005 tmp &= 0x1c1c1c1c;
2006 tmp |= 0x03030303;
2007 writel(tmp, hc_mmio + 0x20);
2008 }
2009 #undef ZERO
2010
2011 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2012 unsigned int n_hc)
2013 {
2014 unsigned int hc, port;
2015
2016 for (hc = 0; hc < n_hc; hc++) {
2017 for (port = 0; port < MV_PORTS_PER_HC; port++)
2018 mv5_reset_hc_port(hpriv, mmio,
2019 (hc * MV_PORTS_PER_HC) + port);
2020
2021 mv5_reset_one_hc(hpriv, mmio, hc);
2022 }
2023
2024 return 0;
2025 }
2026
2027 #undef ZERO
2028 #define ZERO(reg) writel(0, mmio + (reg))
2029 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2030 {
2031 struct mv_host_priv *hpriv = host->private_data;
2032 u32 tmp;
2033
2034 tmp = readl(mmio + MV_PCI_MODE);
2035 tmp &= 0xff00ffff;
2036 writel(tmp, mmio + MV_PCI_MODE);
2037
2038 ZERO(MV_PCI_DISC_TIMER);
2039 ZERO(MV_PCI_MSI_TRIGGER);
2040 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2041 ZERO(HC_MAIN_IRQ_MASK_OFS);
2042 ZERO(MV_PCI_SERR_MASK);
2043 ZERO(hpriv->irq_cause_ofs);
2044 ZERO(hpriv->irq_mask_ofs);
2045 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2046 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2047 ZERO(MV_PCI_ERR_ATTRIBUTE);
2048 ZERO(MV_PCI_ERR_COMMAND);
2049 }
2050 #undef ZERO
2051
2052 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2053 {
2054 u32 tmp;
2055
2056 mv5_reset_flash(hpriv, mmio);
2057
2058 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2059 tmp &= 0x3;
2060 tmp |= (1 << 5) | (1 << 6);
2061 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2062 }
2063
2064 /**
2065 * mv6_reset_hc - Perform the 6xxx global soft reset
2066 * @mmio: base address of the HBA
2067 *
2068 * This routine only applies to 6xxx parts.
2069 *
2070 * LOCKING:
2071 * Inherited from caller.
2072 */
2073 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2074 unsigned int n_hc)
2075 {
2076 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2077 int i, rc = 0;
2078 u32 t;
2079
2080 /* Following procedure defined in PCI "main command and status
2081 * register" table.
2082 */
2083 t = readl(reg);
2084 writel(t | STOP_PCI_MASTER, reg);
2085
2086 for (i = 0; i < 1000; i++) {
2087 udelay(1);
2088 t = readl(reg);
2089 if (PCI_MASTER_EMPTY & t)
2090 break;
2091 }
2092 if (!(PCI_MASTER_EMPTY & t)) {
2093 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2094 rc = 1;
2095 goto done;
2096 }
2097
2098 /* set reset */
2099 i = 5;
2100 do {
2101 writel(t | GLOB_SFT_RST, reg);
2102 t = readl(reg);
2103 udelay(1);
2104 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2105
2106 if (!(GLOB_SFT_RST & t)) {
2107 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2108 rc = 1;
2109 goto done;
2110 }
2111
2112 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2113 i = 5;
2114 do {
2115 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2116 t = readl(reg);
2117 udelay(1);
2118 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2119
2120 if (GLOB_SFT_RST & t) {
2121 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2122 rc = 1;
2123 }
2124 done:
2125 return rc;
2126 }
2127
2128 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2129 void __iomem *mmio)
2130 {
2131 void __iomem *port_mmio;
2132 u32 tmp;
2133
2134 tmp = readl(mmio + MV_RESET_CFG);
2135 if ((tmp & (1 << 0)) == 0) {
2136 hpriv->signal[idx].amps = 0x7 << 8;
2137 hpriv->signal[idx].pre = 0x1 << 5;
2138 return;
2139 }
2140
2141 port_mmio = mv_port_base(mmio, idx);
2142 tmp = readl(port_mmio + PHY_MODE2);
2143
2144 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2145 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2146 }
2147
2148 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2149 {
2150 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2151 }
2152
2153 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2154 unsigned int port)
2155 {
2156 void __iomem *port_mmio = mv_port_base(mmio, port);
2157
2158 u32 hp_flags = hpriv->hp_flags;
2159 int fix_phy_mode2 =
2160 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2161 int fix_phy_mode4 =
2162 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2163 u32 m2, tmp;
2164
2165 if (fix_phy_mode2) {
2166 m2 = readl(port_mmio + PHY_MODE2);
2167 m2 &= ~(1 << 16);
2168 m2 |= (1 << 31);
2169 writel(m2, port_mmio + PHY_MODE2);
2170
2171 udelay(200);
2172
2173 m2 = readl(port_mmio + PHY_MODE2);
2174 m2 &= ~((1 << 16) | (1 << 31));
2175 writel(m2, port_mmio + PHY_MODE2);
2176
2177 udelay(200);
2178 }
2179
2180 /* who knows what this magic does */
2181 tmp = readl(port_mmio + PHY_MODE3);
2182 tmp &= ~0x7F800000;
2183 tmp |= 0x2A800000;
2184 writel(tmp, port_mmio + PHY_MODE3);
2185
2186 if (fix_phy_mode4) {
2187 u32 m4;
2188
2189 m4 = readl(port_mmio + PHY_MODE4);
2190
2191 if (hp_flags & MV_HP_ERRATA_60X1B2)
2192 tmp = readl(port_mmio + 0x310);
2193
2194 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2195
2196 writel(m4, port_mmio + PHY_MODE4);
2197
2198 if (hp_flags & MV_HP_ERRATA_60X1B2)
2199 writel(tmp, port_mmio + 0x310);
2200 }
2201
2202 /* Revert values of pre-emphasis and signal amps to the saved ones */
2203 m2 = readl(port_mmio + PHY_MODE2);
2204
2205 m2 &= ~MV_M2_PREAMP_MASK;
2206 m2 |= hpriv->signal[port].amps;
2207 m2 |= hpriv->signal[port].pre;
2208 m2 &= ~(1 << 16);
2209
2210 /* according to mvSata 3.6.1, some IIE values are fixed */
2211 if (IS_GEN_IIE(hpriv)) {
2212 m2 &= ~0xC30FF01F;
2213 m2 |= 0x0000900F;
2214 }
2215
2216 writel(m2, port_mmio + PHY_MODE2);
2217 }
2218
2219 /* TODO: use the generic LED interface to configure the SATA Presence */
2220 /* & Acitivy LEDs on the board */
2221 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2222 void __iomem *mmio)
2223 {
2224 return;
2225 }
2226
2227 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2228 void __iomem *mmio)
2229 {
2230 void __iomem *port_mmio;
2231 u32 tmp;
2232
2233 port_mmio = mv_port_base(mmio, idx);
2234 tmp = readl(port_mmio + PHY_MODE2);
2235
2236 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2237 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2238 }
2239
2240 #undef ZERO
2241 #define ZERO(reg) writel(0, port_mmio + (reg))
2242 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2243 void __iomem *mmio, unsigned int port)
2244 {
2245 void __iomem *port_mmio = mv_port_base(mmio, port);
2246
2247 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2248
2249 mv_channel_reset(hpriv, mmio, port);
2250
2251 ZERO(0x028); /* command */
2252 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2253 ZERO(0x004); /* timer */
2254 ZERO(0x008); /* irq err cause */
2255 ZERO(0x00c); /* irq err mask */
2256 ZERO(0x010); /* rq bah */
2257 ZERO(0x014); /* rq inp */
2258 ZERO(0x018); /* rq outp */
2259 ZERO(0x01c); /* respq bah */
2260 ZERO(0x024); /* respq outp */
2261 ZERO(0x020); /* respq inp */
2262 ZERO(0x02c); /* test control */
2263 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2264 }
2265
2266 #undef ZERO
2267
2268 #define ZERO(reg) writel(0, hc_mmio + (reg))
2269 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2270 void __iomem *mmio)
2271 {
2272 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2273
2274 ZERO(0x00c);
2275 ZERO(0x010);
2276 ZERO(0x014);
2277
2278 }
2279
2280 #undef ZERO
2281
2282 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2283 void __iomem *mmio, unsigned int n_hc)
2284 {
2285 unsigned int port;
2286
2287 for (port = 0; port < hpriv->n_ports; port++)
2288 mv_soc_reset_hc_port(hpriv, mmio, port);
2289
2290 mv_soc_reset_one_hc(hpriv, mmio);
2291
2292 return 0;
2293 }
2294
2295 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2296 void __iomem *mmio)
2297 {
2298 return;
2299 }
2300
2301 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2302 {
2303 return;
2304 }
2305
2306 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2307 unsigned int port_no)
2308 {
2309 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2310
2311 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2312
2313 if (IS_GEN_II(hpriv)) {
2314 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2315 ifctl |= (1 << 7); /* enable gen2i speed */
2316 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2317 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2318 }
2319
2320 udelay(25); /* allow reset propagation */
2321
2322 /* Spec never mentions clearing the bit. Marvell's driver does
2323 * clear the bit, however.
2324 */
2325 writelfl(0, port_mmio + EDMA_CMD_OFS);
2326
2327 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2328
2329 if (IS_GEN_I(hpriv))
2330 mdelay(1);
2331 }
2332
2333 /**
2334 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2335 * @ap: ATA channel to manipulate
2336 *
2337 * Part of this is taken from __sata_phy_reset and modified to
2338 * not sleep since this routine gets called from interrupt level.
2339 *
2340 * LOCKING:
2341 * Inherited from caller. This is coded to safe to call at
2342 * interrupt level, i.e. it does not sleep.
2343 */
2344 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2345 unsigned long deadline)
2346 {
2347 struct mv_port_priv *pp = ap->private_data;
2348 struct mv_host_priv *hpriv = ap->host->private_data;
2349 void __iomem *port_mmio = mv_ap_base(ap);
2350 int retry = 5;
2351 u32 sstatus;
2352
2353 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2354
2355 #ifdef DEBUG
2356 {
2357 u32 sstatus, serror, scontrol;
2358
2359 mv_scr_read(ap, SCR_STATUS, &sstatus);
2360 mv_scr_read(ap, SCR_ERROR, &serror);
2361 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2362 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2363 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2364 }
2365 #endif
2366
2367 /* Issue COMRESET via SControl */
2368 comreset_retry:
2369 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2370 msleep(1);
2371
2372 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2373 msleep(20);
2374
2375 do {
2376 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2377 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2378 break;
2379
2380 msleep(1);
2381 } while (time_before(jiffies, deadline));
2382
2383 /* work around errata */
2384 if (IS_GEN_II(hpriv) &&
2385 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2386 (retry-- > 0))
2387 goto comreset_retry;
2388
2389 #ifdef DEBUG
2390 {
2391 u32 sstatus, serror, scontrol;
2392
2393 mv_scr_read(ap, SCR_STATUS, &sstatus);
2394 mv_scr_read(ap, SCR_ERROR, &serror);
2395 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2396 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2397 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2398 }
2399 #endif
2400
2401 if (ata_link_offline(&ap->link)) {
2402 *class = ATA_DEV_NONE;
2403 return;
2404 }
2405
2406 /* even after SStatus reflects that device is ready,
2407 * it seems to take a while for link to be fully
2408 * established (and thus Status no longer 0x80/0x7F),
2409 * so we poll a bit for that, here.
2410 */
2411 retry = 20;
2412 while (1) {
2413 u8 drv_stat = ata_check_status(ap);
2414 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2415 break;
2416 msleep(500);
2417 if (retry-- <= 0)
2418 break;
2419 if (time_after(jiffies, deadline))
2420 break;
2421 }
2422
2423 /* FIXME: if we passed the deadline, the following
2424 * code probably produces an invalid result
2425 */
2426
2427 /* finally, read device signature from TF registers */
2428 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2429
2430 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2431
2432 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2433
2434 VPRINTK("EXIT\n");
2435 }
2436
2437 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2438 {
2439 struct ata_port *ap = link->ap;
2440 struct mv_port_priv *pp = ap->private_data;
2441
2442 mv_stop_dma(ap);
2443
2444 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
2445 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2446
2447 return 0;
2448 }
2449
2450 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2451 unsigned long deadline)
2452 {
2453 struct ata_port *ap = link->ap;
2454 struct mv_host_priv *hpriv = ap->host->private_data;
2455 void __iomem *mmio = hpriv->base;
2456
2457 mv_stop_dma(ap);
2458
2459 mv_channel_reset(hpriv, mmio, ap->port_no);
2460
2461 mv_phy_reset(ap, class, deadline);
2462
2463 return 0;
2464 }
2465
2466 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2467 {
2468 struct ata_port *ap = link->ap;
2469 u32 serr;
2470
2471 /* print link status */
2472 sata_print_link_status(link);
2473
2474 /* clear SError */
2475 sata_scr_read(link, SCR_ERROR, &serr);
2476 sata_scr_write_flush(link, SCR_ERROR, serr);
2477
2478 /* bail out if no device is present */
2479 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2480 DPRINTK("EXIT, no device\n");
2481 return;
2482 }
2483
2484 /* set up device control */
2485 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2486 }
2487
2488 static void mv_error_handler(struct ata_port *ap)
2489 {
2490 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2491 mv_hardreset, mv_postreset);
2492 }
2493
2494 static void mv_eh_freeze(struct ata_port *ap)
2495 {
2496 struct mv_host_priv *hpriv = ap->host->private_data;
2497 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2498 u32 tmp, mask;
2499 unsigned int shift;
2500
2501 /* FIXME: handle coalescing completion events properly */
2502
2503 shift = ap->port_no * 2;
2504 if (hc > 0)
2505 shift++;
2506
2507 mask = 0x3 << shift;
2508
2509 /* disable assertion of portN err, done events */
2510 tmp = readl(hpriv->main_mask_reg_addr);
2511 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2512 }
2513
2514 static void mv_eh_thaw(struct ata_port *ap)
2515 {
2516 struct mv_host_priv *hpriv = ap->host->private_data;
2517 void __iomem *mmio = hpriv->base;
2518 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2519 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2520 void __iomem *port_mmio = mv_ap_base(ap);
2521 u32 tmp, mask, hc_irq_cause;
2522 unsigned int shift, hc_port_no = ap->port_no;
2523
2524 /* FIXME: handle coalescing completion events properly */
2525
2526 shift = ap->port_no * 2;
2527 if (hc > 0) {
2528 shift++;
2529 hc_port_no -= 4;
2530 }
2531
2532 mask = 0x3 << shift;
2533
2534 /* clear EDMA errors on this port */
2535 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2536
2537 /* clear pending irq events */
2538 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2539 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2540 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2541 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2542
2543 /* enable assertion of portN err, done events */
2544 tmp = readl(hpriv->main_mask_reg_addr);
2545 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2546 }
2547
2548 /**
2549 * mv_port_init - Perform some early initialization on a single port.
2550 * @port: libata data structure storing shadow register addresses
2551 * @port_mmio: base address of the port
2552 *
2553 * Initialize shadow register mmio addresses, clear outstanding
2554 * interrupts on the port, and unmask interrupts for the future
2555 * start of the port.
2556 *
2557 * LOCKING:
2558 * Inherited from caller.
2559 */
2560 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2561 {
2562 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2563 unsigned serr_ofs;
2564
2565 /* PIO related setup
2566 */
2567 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2568 port->error_addr =
2569 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2570 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2571 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2572 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2573 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2574 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2575 port->status_addr =
2576 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2577 /* special case: control/altstatus doesn't have ATA_REG_ address */
2578 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2579
2580 /* unused: */
2581 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2582
2583 /* Clear any currently outstanding port interrupt conditions */
2584 serr_ofs = mv_scr_offset(SCR_ERROR);
2585 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2586 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2587
2588 /* unmask all non-transient EDMA error interrupts */
2589 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2590
2591 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2592 readl(port_mmio + EDMA_CFG_OFS),
2593 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2594 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2595 }
2596
2597 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2598 {
2599 struct pci_dev *pdev = to_pci_dev(host->dev);
2600 struct mv_host_priv *hpriv = host->private_data;
2601 u32 hp_flags = hpriv->hp_flags;
2602
2603 switch (board_idx) {
2604 case chip_5080:
2605 hpriv->ops = &mv5xxx_ops;
2606 hp_flags |= MV_HP_GEN_I;
2607
2608 switch (pdev->revision) {
2609 case 0x1:
2610 hp_flags |= MV_HP_ERRATA_50XXB0;
2611 break;
2612 case 0x3:
2613 hp_flags |= MV_HP_ERRATA_50XXB2;
2614 break;
2615 default:
2616 dev_printk(KERN_WARNING, &pdev->dev,
2617 "Applying 50XXB2 workarounds to unknown rev\n");
2618 hp_flags |= MV_HP_ERRATA_50XXB2;
2619 break;
2620 }
2621 break;
2622
2623 case chip_504x:
2624 case chip_508x:
2625 hpriv->ops = &mv5xxx_ops;
2626 hp_flags |= MV_HP_GEN_I;
2627
2628 switch (pdev->revision) {
2629 case 0x0:
2630 hp_flags |= MV_HP_ERRATA_50XXB0;
2631 break;
2632 case 0x3:
2633 hp_flags |= MV_HP_ERRATA_50XXB2;
2634 break;
2635 default:
2636 dev_printk(KERN_WARNING, &pdev->dev,
2637 "Applying B2 workarounds to unknown rev\n");
2638 hp_flags |= MV_HP_ERRATA_50XXB2;
2639 break;
2640 }
2641 break;
2642
2643 case chip_604x:
2644 case chip_608x:
2645 hpriv->ops = &mv6xxx_ops;
2646 hp_flags |= MV_HP_GEN_II;
2647
2648 switch (pdev->revision) {
2649 case 0x7:
2650 hp_flags |= MV_HP_ERRATA_60X1B2;
2651 break;
2652 case 0x9:
2653 hp_flags |= MV_HP_ERRATA_60X1C0;
2654 break;
2655 default:
2656 dev_printk(KERN_WARNING, &pdev->dev,
2657 "Applying B2 workarounds to unknown rev\n");
2658 hp_flags |= MV_HP_ERRATA_60X1B2;
2659 break;
2660 }
2661 break;
2662
2663 case chip_7042:
2664 hp_flags |= MV_HP_PCIE;
2665 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2666 (pdev->device == 0x2300 || pdev->device == 0x2310))
2667 {
2668 /*
2669 * Highpoint RocketRAID PCIe 23xx series cards:
2670 *
2671 * Unconfigured drives are treated as "Legacy"
2672 * by the BIOS, and it overwrites sector 8 with
2673 * a "Lgcy" metadata block prior to Linux boot.
2674 *
2675 * Configured drives (RAID or JBOD) leave sector 8
2676 * alone, but instead overwrite a high numbered
2677 * sector for the RAID metadata. This sector can
2678 * be determined exactly, by truncating the physical
2679 * drive capacity to a nice even GB value.
2680 *
2681 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2682 *
2683 * Warn the user, lest they think we're just buggy.
2684 */
2685 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2686 " BIOS CORRUPTS DATA on all attached drives,"
2687 " regardless of if/how they are configured."
2688 " BEWARE!\n");
2689 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2690 " use sectors 8-9 on \"Legacy\" drives,"
2691 " and avoid the final two gigabytes on"
2692 " all RocketRAID BIOS initialized drives.\n");
2693 }
2694 case chip_6042:
2695 hpriv->ops = &mv6xxx_ops;
2696 hp_flags |= MV_HP_GEN_IIE;
2697
2698 switch (pdev->revision) {
2699 case 0x0:
2700 hp_flags |= MV_HP_ERRATA_XX42A0;
2701 break;
2702 case 0x1:
2703 hp_flags |= MV_HP_ERRATA_60X1C0;
2704 break;
2705 default:
2706 dev_printk(KERN_WARNING, &pdev->dev,
2707 "Applying 60X1C0 workarounds to unknown rev\n");
2708 hp_flags |= MV_HP_ERRATA_60X1C0;
2709 break;
2710 }
2711 break;
2712 case chip_soc:
2713 hpriv->ops = &mv_soc_ops;
2714 hp_flags |= MV_HP_ERRATA_60X1C0;
2715 break;
2716
2717 default:
2718 dev_printk(KERN_ERR, host->dev,
2719 "BUG: invalid board index %u\n", board_idx);
2720 return 1;
2721 }
2722
2723 hpriv->hp_flags = hp_flags;
2724 if (hp_flags & MV_HP_PCIE) {
2725 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2726 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2727 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2728 } else {
2729 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2730 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2731 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2732 }
2733
2734 return 0;
2735 }
2736
2737 /**
2738 * mv_init_host - Perform some early initialization of the host.
2739 * @host: ATA host to initialize
2740 * @board_idx: controller index
2741 *
2742 * If possible, do an early global reset of the host. Then do
2743 * our port init and clear/unmask all/relevant host interrupts.
2744 *
2745 * LOCKING:
2746 * Inherited from caller.
2747 */
2748 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2749 {
2750 int rc = 0, n_hc, port, hc;
2751 struct mv_host_priv *hpriv = host->private_data;
2752 void __iomem *mmio = hpriv->base;
2753
2754 rc = mv_chip_id(host, board_idx);
2755 if (rc)
2756 goto done;
2757
2758 if (HAS_PCI(host)) {
2759 hpriv->main_cause_reg_addr = hpriv->base +
2760 HC_MAIN_IRQ_CAUSE_OFS;
2761 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2762 } else {
2763 hpriv->main_cause_reg_addr = hpriv->base +
2764 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2765 hpriv->main_mask_reg_addr = hpriv->base +
2766 HC_SOC_MAIN_IRQ_MASK_OFS;
2767 }
2768 /* global interrupt mask */
2769 writel(0, hpriv->main_mask_reg_addr);
2770
2771 n_hc = mv_get_hc_count(host->ports[0]->flags);
2772
2773 for (port = 0; port < host->n_ports; port++)
2774 hpriv->ops->read_preamp(hpriv, port, mmio);
2775
2776 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2777 if (rc)
2778 goto done;
2779
2780 hpriv->ops->reset_flash(hpriv, mmio);
2781 hpriv->ops->reset_bus(host, mmio);
2782 hpriv->ops->enable_leds(hpriv, mmio);
2783
2784 for (port = 0; port < host->n_ports; port++) {
2785 if (IS_GEN_II(hpriv)) {
2786 void __iomem *port_mmio = mv_port_base(mmio, port);
2787
2788 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2789 ifctl |= (1 << 7); /* enable gen2i speed */
2790 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2791 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2792 }
2793
2794 hpriv->ops->phy_errata(hpriv, mmio, port);
2795 }
2796
2797 for (port = 0; port < host->n_ports; port++) {
2798 struct ata_port *ap = host->ports[port];
2799 void __iomem *port_mmio = mv_port_base(mmio, port);
2800
2801 mv_port_init(&ap->ioaddr, port_mmio);
2802
2803 #ifdef CONFIG_PCI
2804 if (HAS_PCI(host)) {
2805 unsigned int offset = port_mmio - mmio;
2806 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2807 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2808 }
2809 #endif
2810 }
2811
2812 for (hc = 0; hc < n_hc; hc++) {
2813 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2814
2815 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2816 "(before clear)=0x%08x\n", hc,
2817 readl(hc_mmio + HC_CFG_OFS),
2818 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2819
2820 /* Clear any currently outstanding hc interrupt conditions */
2821 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2822 }
2823
2824 if (HAS_PCI(host)) {
2825 /* Clear any currently outstanding host interrupt conditions */
2826 writelfl(0, mmio + hpriv->irq_cause_ofs);
2827
2828 /* and unmask interrupt generation for host regs */
2829 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2830 if (IS_GEN_I(hpriv))
2831 writelfl(~HC_MAIN_MASKED_IRQS_5,
2832 hpriv->main_mask_reg_addr);
2833 else
2834 writelfl(~HC_MAIN_MASKED_IRQS,
2835 hpriv->main_mask_reg_addr);
2836
2837 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2838 "PCI int cause/mask=0x%08x/0x%08x\n",
2839 readl(hpriv->main_cause_reg_addr),
2840 readl(hpriv->main_mask_reg_addr),
2841 readl(mmio + hpriv->irq_cause_ofs),
2842 readl(mmio + hpriv->irq_mask_ofs));
2843 } else {
2844 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2845 hpriv->main_mask_reg_addr);
2846 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2847 readl(hpriv->main_cause_reg_addr),
2848 readl(hpriv->main_mask_reg_addr));
2849 }
2850 done:
2851 return rc;
2852 }
2853
2854 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2855 {
2856 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2857 MV_CRQB_Q_SZ, 0);
2858 if (!hpriv->crqb_pool)
2859 return -ENOMEM;
2860
2861 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2862 MV_CRPB_Q_SZ, 0);
2863 if (!hpriv->crpb_pool)
2864 return -ENOMEM;
2865
2866 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2867 MV_SG_TBL_SZ, 0);
2868 if (!hpriv->sg_tbl_pool)
2869 return -ENOMEM;
2870
2871 return 0;
2872 }
2873
2874 /**
2875 * mv_platform_probe - handle a positive probe of an soc Marvell
2876 * host
2877 * @pdev: platform device found
2878 *
2879 * LOCKING:
2880 * Inherited from caller.
2881 */
2882 static int mv_platform_probe(struct platform_device *pdev)
2883 {
2884 static int printed_version;
2885 const struct mv_sata_platform_data *mv_platform_data;
2886 const struct ata_port_info *ppi[] =
2887 { &mv_port_info[chip_soc], NULL };
2888 struct ata_host *host;
2889 struct mv_host_priv *hpriv;
2890 struct resource *res;
2891 int n_ports, rc;
2892
2893 if (!printed_version++)
2894 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2895
2896 /*
2897 * Simple resource validation ..
2898 */
2899 if (unlikely(pdev->num_resources != 2)) {
2900 dev_err(&pdev->dev, "invalid number of resources\n");
2901 return -EINVAL;
2902 }
2903
2904 /*
2905 * Get the register base first
2906 */
2907 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2908 if (res == NULL)
2909 return -EINVAL;
2910
2911 /* allocate host */
2912 mv_platform_data = pdev->dev.platform_data;
2913 n_ports = mv_platform_data->n_ports;
2914
2915 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2916 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2917
2918 if (!host || !hpriv)
2919 return -ENOMEM;
2920 host->private_data = hpriv;
2921 hpriv->n_ports = n_ports;
2922
2923 host->iomap = NULL;
2924 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2925 res->end - res->start + 1);
2926 hpriv->base -= MV_SATAHC0_REG_BASE;
2927
2928 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2929 if (rc)
2930 return rc;
2931
2932 /* initialize adapter */
2933 rc = mv_init_host(host, chip_soc);
2934 if (rc)
2935 return rc;
2936
2937 dev_printk(KERN_INFO, &pdev->dev,
2938 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2939 host->n_ports);
2940
2941 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2942 IRQF_SHARED, &mv6_sht);
2943 }
2944
2945 /*
2946 *
2947 * mv_platform_remove - unplug a platform interface
2948 * @pdev: platform device
2949 *
2950 * A platform bus SATA device has been unplugged. Perform the needed
2951 * cleanup. Also called on module unload for any active devices.
2952 */
2953 static int __devexit mv_platform_remove(struct platform_device *pdev)
2954 {
2955 struct device *dev = &pdev->dev;
2956 struct ata_host *host = dev_get_drvdata(dev);
2957
2958 ata_host_detach(host);
2959 return 0;
2960 }
2961
2962 static struct platform_driver mv_platform_driver = {
2963 .probe = mv_platform_probe,
2964 .remove = __devexit_p(mv_platform_remove),
2965 .driver = {
2966 .name = DRV_NAME,
2967 .owner = THIS_MODULE,
2968 },
2969 };
2970
2971
2972 #ifdef CONFIG_PCI
2973 static int mv_pci_init_one(struct pci_dev *pdev,
2974 const struct pci_device_id *ent);
2975
2976
2977 static struct pci_driver mv_pci_driver = {
2978 .name = DRV_NAME,
2979 .id_table = mv_pci_tbl,
2980 .probe = mv_pci_init_one,
2981 .remove = ata_pci_remove_one,
2982 };
2983
2984 /*
2985 * module options
2986 */
2987 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2988
2989
2990 /* move to PCI layer or libata core? */
2991 static int pci_go_64(struct pci_dev *pdev)
2992 {
2993 int rc;
2994
2995 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2996 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2997 if (rc) {
2998 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2999 if (rc) {
3000 dev_printk(KERN_ERR, &pdev->dev,
3001 "64-bit DMA enable failed\n");
3002 return rc;
3003 }
3004 }
3005 } else {
3006 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3007 if (rc) {
3008 dev_printk(KERN_ERR, &pdev->dev,
3009 "32-bit DMA enable failed\n");
3010 return rc;
3011 }
3012 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3013 if (rc) {
3014 dev_printk(KERN_ERR, &pdev->dev,
3015 "32-bit consistent DMA enable failed\n");
3016 return rc;
3017 }
3018 }
3019
3020 return rc;
3021 }
3022
3023 /**
3024 * mv_print_info - Dump key info to kernel log for perusal.
3025 * @host: ATA host to print info about
3026 *
3027 * FIXME: complete this.
3028 *
3029 * LOCKING:
3030 * Inherited from caller.
3031 */
3032 static void mv_print_info(struct ata_host *host)
3033 {
3034 struct pci_dev *pdev = to_pci_dev(host->dev);
3035 struct mv_host_priv *hpriv = host->private_data;
3036 u8 scc;
3037 const char *scc_s, *gen;
3038
3039 /* Use this to determine the HW stepping of the chip so we know
3040 * what errata to workaround
3041 */
3042 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3043 if (scc == 0)
3044 scc_s = "SCSI";
3045 else if (scc == 0x01)
3046 scc_s = "RAID";
3047 else
3048 scc_s = "?";
3049
3050 if (IS_GEN_I(hpriv))
3051 gen = "I";
3052 else if (IS_GEN_II(hpriv))
3053 gen = "II";
3054 else if (IS_GEN_IIE(hpriv))
3055 gen = "IIE";
3056 else
3057 gen = "?";
3058
3059 dev_printk(KERN_INFO, &pdev->dev,
3060 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3061 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3062 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3063 }
3064
3065 /**
3066 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3067 * @pdev: PCI device found
3068 * @ent: PCI device ID entry for the matched host
3069 *
3070 * LOCKING:
3071 * Inherited from caller.
3072 */
3073 static int mv_pci_init_one(struct pci_dev *pdev,
3074 const struct pci_device_id *ent)
3075 {
3076 static int printed_version;
3077 unsigned int board_idx = (unsigned int)ent->driver_data;
3078 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3079 struct ata_host *host;
3080 struct mv_host_priv *hpriv;
3081 int n_ports, rc;
3082
3083 if (!printed_version++)
3084 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3085
3086 /* allocate host */
3087 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3088
3089 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3090 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3091 if (!host || !hpriv)
3092 return -ENOMEM;
3093 host->private_data = hpriv;
3094 hpriv->n_ports = n_ports;
3095
3096 /* acquire resources */
3097 rc = pcim_enable_device(pdev);
3098 if (rc)
3099 return rc;
3100
3101 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3102 if (rc == -EBUSY)
3103 pcim_pin_device(pdev);
3104 if (rc)
3105 return rc;
3106 host->iomap = pcim_iomap_table(pdev);
3107 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3108
3109 rc = pci_go_64(pdev);
3110 if (rc)
3111 return rc;
3112
3113 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3114 if (rc)
3115 return rc;
3116
3117 /* initialize adapter */
3118 rc = mv_init_host(host, board_idx);
3119 if (rc)
3120 return rc;
3121
3122 /* Enable interrupts */
3123 if (msi && pci_enable_msi(pdev))
3124 pci_intx(pdev, 1);
3125
3126 mv_dump_pci_cfg(pdev, 0x68);
3127 mv_print_info(host);
3128
3129 pci_set_master(pdev);
3130 pci_try_set_mwi(pdev);
3131 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3132 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3133 }
3134 #endif
3135
3136 static int mv_platform_probe(struct platform_device *pdev);
3137 static int __devexit mv_platform_remove(struct platform_device *pdev);
3138
3139 static int __init mv_init(void)
3140 {
3141 int rc = -ENODEV;
3142 #ifdef CONFIG_PCI
3143 rc = pci_register_driver(&mv_pci_driver);
3144 if (rc < 0)
3145 return rc;
3146 #endif
3147 rc = platform_driver_register(&mv_platform_driver);
3148
3149 #ifdef CONFIG_PCI
3150 if (rc < 0)
3151 pci_unregister_driver(&mv_pci_driver);
3152 #endif
3153 return rc;
3154 }
3155
3156 static void __exit mv_exit(void)
3157 {
3158 #ifdef CONFIG_PCI
3159 pci_unregister_driver(&mv_pci_driver);
3160 #endif
3161 platform_driver_unregister(&mv_platform_driver);
3162 }
3163
3164 MODULE_AUTHOR("Brett Russ");
3165 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3166 MODULE_LICENSE("GPL");
3167 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3168 MODULE_VERSION(DRV_VERSION);
3169 MODULE_ALIAS("platform:sata_mv");
3170
3171 #ifdef CONFIG_PCI
3172 module_param(msi, int, 0444);
3173 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3174 #endif
3175
3176 module_init(mv_init);
3177 module_exit(mv_exit);
This page took 0.127921 seconds and 5 git commands to generate.