Merge branch 'linux-2.6'
[deliverable/linux.git] / drivers / ata / sata_mv.c
1 /*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58 */
59
60
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
74
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
77
78 enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
99
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
132
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
147 /* PCI interface registers */
148
149 PCI_COMMAND_OFS = 0xc00,
150
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x70a, /* assorted bits */
174
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
181 PCI_ERR = (1 << 18),
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
194 HC_MAIN_RSVD),
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
196 HC_MAIN_RSVD_5),
197
198 /* SATAHC registers */
199 HC_CFG_OFS = 0,
200
201 HC_IRQ_CAUSE_OFS = 0x14,
202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
205
206 /* Shadow block registers */
207 SHD_BLK_OFS = 0x100,
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
209
210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
213 PHY_MODE3 = 0x310,
214 PHY_MODE4 = 0x314,
215 PHY_MODE2 = 0x330,
216 MV5_PHY_MODE = 0x74,
217 MV5_LT_MODE = 0x30,
218 MV5_PHY_CTL = 0x0C,
219 SATA_INTERFACE_CTL = 0x050,
220
221 MV_M2_PREAMP_MASK = 0x7e0,
222
223 /* Port registers */
224 EDMA_CFG_OFS = 0,
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
226 EDMA_CFG_NCQ = (1 << 5),
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
230
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc,
233 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
234 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
235 EDMA_ERR_DEV = (1 << 2), /* device error */
236 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
237 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
238 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
239 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
240 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
241 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
242 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
243 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
244 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
245 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
246 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
247 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
248 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
249 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
250 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
251 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
252 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
253 EDMA_ERR_OVERRUN_5 = (1 << 5),
254 EDMA_ERR_UNDERRUN_5 = (1 << 6),
255 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
256 EDMA_ERR_PRD_PAR |
257 EDMA_ERR_DEV_DCON |
258 EDMA_ERR_DEV_CON |
259 EDMA_ERR_SERR |
260 EDMA_ERR_SELF_DIS |
261 EDMA_ERR_CRQB_PAR |
262 EDMA_ERR_CRPB_PAR |
263 EDMA_ERR_INTRL_PAR |
264 EDMA_ERR_IORDY |
265 EDMA_ERR_LNK_CTRL_RX_2 |
266 EDMA_ERR_LNK_DATA_RX |
267 EDMA_ERR_LNK_DATA_TX |
268 EDMA_ERR_TRANS_PROTO,
269 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
270 EDMA_ERR_PRD_PAR |
271 EDMA_ERR_DEV_DCON |
272 EDMA_ERR_DEV_CON |
273 EDMA_ERR_OVERRUN_5 |
274 EDMA_ERR_UNDERRUN_5 |
275 EDMA_ERR_SELF_DIS_5 |
276 EDMA_ERR_CRQB_PAR |
277 EDMA_ERR_CRPB_PAR |
278 EDMA_ERR_INTRL_PAR |
279 EDMA_ERR_IORDY,
280
281 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
282 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
283
284 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
285 EDMA_REQ_Q_PTR_SHIFT = 5,
286
287 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
288 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
289 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
290 EDMA_RSP_Q_PTR_SHIFT = 3,
291
292 EDMA_CMD_OFS = 0x28, /* EDMA command register */
293 EDMA_EN = (1 << 0), /* enable EDMA */
294 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
295 ATA_RST = (1 << 2), /* reset trans/link/phy */
296
297 EDMA_IORDY_TMOUT = 0x34,
298 EDMA_ARB_CFG = 0x38,
299
300 /* Host private flags (hp_flags) */
301 MV_HP_FLAG_MSI = (1 << 0),
302 MV_HP_ERRATA_50XXB0 = (1 << 1),
303 MV_HP_ERRATA_50XXB2 = (1 << 2),
304 MV_HP_ERRATA_60X1B2 = (1 << 3),
305 MV_HP_ERRATA_60X1C0 = (1 << 4),
306 MV_HP_ERRATA_XX42A0 = (1 << 5),
307 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
308 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
309 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
310 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
311
312 /* Port private flags (pp_flags) */
313 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
314 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
315 };
316
317 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
318 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
319 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
320
321 enum {
322 /* DMA boundary 0xffff is required by the s/g splitting
323 * we need on /length/ in mv_fill-sg().
324 */
325 MV_DMA_BOUNDARY = 0xffffU,
326
327 /* mask of register bits containing lower 32 bits
328 * of EDMA request queue DMA address
329 */
330 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
331
332 /* ditto, for response queue */
333 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
334 };
335
336 enum chip_type {
337 chip_504x,
338 chip_508x,
339 chip_5080,
340 chip_604x,
341 chip_608x,
342 chip_6042,
343 chip_7042,
344 };
345
346 /* Command ReQuest Block: 32B */
347 struct mv_crqb {
348 __le32 sg_addr;
349 __le32 sg_addr_hi;
350 __le16 ctrl_flags;
351 __le16 ata_cmd[11];
352 };
353
354 struct mv_crqb_iie {
355 __le32 addr;
356 __le32 addr_hi;
357 __le32 flags;
358 __le32 len;
359 __le32 ata_cmd[4];
360 };
361
362 /* Command ResPonse Block: 8B */
363 struct mv_crpb {
364 __le16 id;
365 __le16 flags;
366 __le32 tmstmp;
367 };
368
369 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
370 struct mv_sg {
371 __le32 addr;
372 __le32 flags_size;
373 __le32 addr_hi;
374 __le32 reserved;
375 };
376
377 struct mv_port_priv {
378 struct mv_crqb *crqb;
379 dma_addr_t crqb_dma;
380 struct mv_crpb *crpb;
381 dma_addr_t crpb_dma;
382 struct mv_sg *sg_tbl;
383 dma_addr_t sg_tbl_dma;
384
385 unsigned int req_idx;
386 unsigned int resp_idx;
387
388 u32 pp_flags;
389 };
390
391 struct mv_port_signal {
392 u32 amps;
393 u32 pre;
394 };
395
396 struct mv_host_priv {
397 u32 hp_flags;
398 struct mv_port_signal signal[8];
399 const struct mv_hw_ops *ops;
400 u32 irq_cause_ofs;
401 u32 irq_mask_ofs;
402 u32 unmask_all_irqs;
403 };
404
405 struct mv_hw_ops {
406 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
407 unsigned int port);
408 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
409 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
410 void __iomem *mmio);
411 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
412 unsigned int n_hc);
413 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
414 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
415 };
416
417 static void mv_irq_clear(struct ata_port *ap);
418 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
419 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
420 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
421 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
422 static int mv_port_start(struct ata_port *ap);
423 static void mv_port_stop(struct ata_port *ap);
424 static void mv_qc_prep(struct ata_queued_cmd *qc);
425 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
426 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
427 static void mv_error_handler(struct ata_port *ap);
428 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
429 static void mv_eh_freeze(struct ata_port *ap);
430 static void mv_eh_thaw(struct ata_port *ap);
431 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
432
433 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
434 unsigned int port);
435 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
436 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
437 void __iomem *mmio);
438 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
439 unsigned int n_hc);
440 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
441 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
442
443 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
444 unsigned int port);
445 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
446 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
447 void __iomem *mmio);
448 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
449 unsigned int n_hc);
450 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
451 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
452 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
453 unsigned int port_no);
454
455 static struct scsi_host_template mv5_sht = {
456 .module = THIS_MODULE,
457 .name = DRV_NAME,
458 .ioctl = ata_scsi_ioctl,
459 .queuecommand = ata_scsi_queuecmd,
460 .can_queue = ATA_DEF_QUEUE,
461 .this_id = ATA_SHT_THIS_ID,
462 .sg_tablesize = MV_MAX_SG_CT / 2,
463 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
464 .emulated = ATA_SHT_EMULATED,
465 .use_clustering = 1,
466 .proc_name = DRV_NAME,
467 .dma_boundary = MV_DMA_BOUNDARY,
468 .slave_configure = ata_scsi_slave_config,
469 .slave_destroy = ata_scsi_slave_destroy,
470 .bios_param = ata_std_bios_param,
471 };
472
473 static struct scsi_host_template mv6_sht = {
474 .module = THIS_MODULE,
475 .name = DRV_NAME,
476 .ioctl = ata_scsi_ioctl,
477 .queuecommand = ata_scsi_queuecmd,
478 .can_queue = ATA_DEF_QUEUE,
479 .this_id = ATA_SHT_THIS_ID,
480 .sg_tablesize = MV_MAX_SG_CT / 2,
481 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
482 .emulated = ATA_SHT_EMULATED,
483 .use_clustering = 1,
484 .proc_name = DRV_NAME,
485 .dma_boundary = MV_DMA_BOUNDARY,
486 .slave_configure = ata_scsi_slave_config,
487 .slave_destroy = ata_scsi_slave_destroy,
488 .bios_param = ata_std_bios_param,
489 };
490
491 static const struct ata_port_operations mv5_ops = {
492 .tf_load = ata_tf_load,
493 .tf_read = ata_tf_read,
494 .check_status = ata_check_status,
495 .exec_command = ata_exec_command,
496 .dev_select = ata_std_dev_select,
497
498 .cable_detect = ata_cable_sata,
499
500 .qc_prep = mv_qc_prep,
501 .qc_issue = mv_qc_issue,
502 .data_xfer = ata_data_xfer,
503
504 .irq_clear = mv_irq_clear,
505 .irq_on = ata_irq_on,
506
507 .error_handler = mv_error_handler,
508 .post_internal_cmd = mv_post_int_cmd,
509 .freeze = mv_eh_freeze,
510 .thaw = mv_eh_thaw,
511
512 .scr_read = mv5_scr_read,
513 .scr_write = mv5_scr_write,
514
515 .port_start = mv_port_start,
516 .port_stop = mv_port_stop,
517 };
518
519 static const struct ata_port_operations mv6_ops = {
520 .tf_load = ata_tf_load,
521 .tf_read = ata_tf_read,
522 .check_status = ata_check_status,
523 .exec_command = ata_exec_command,
524 .dev_select = ata_std_dev_select,
525
526 .cable_detect = ata_cable_sata,
527
528 .qc_prep = mv_qc_prep,
529 .qc_issue = mv_qc_issue,
530 .data_xfer = ata_data_xfer,
531
532 .irq_clear = mv_irq_clear,
533 .irq_on = ata_irq_on,
534
535 .error_handler = mv_error_handler,
536 .post_internal_cmd = mv_post_int_cmd,
537 .freeze = mv_eh_freeze,
538 .thaw = mv_eh_thaw,
539
540 .scr_read = mv_scr_read,
541 .scr_write = mv_scr_write,
542
543 .port_start = mv_port_start,
544 .port_stop = mv_port_stop,
545 };
546
547 static const struct ata_port_operations mv_iie_ops = {
548 .tf_load = ata_tf_load,
549 .tf_read = ata_tf_read,
550 .check_status = ata_check_status,
551 .exec_command = ata_exec_command,
552 .dev_select = ata_std_dev_select,
553
554 .cable_detect = ata_cable_sata,
555
556 .qc_prep = mv_qc_prep_iie,
557 .qc_issue = mv_qc_issue,
558 .data_xfer = ata_data_xfer,
559
560 .irq_clear = mv_irq_clear,
561 .irq_on = ata_irq_on,
562
563 .error_handler = mv_error_handler,
564 .post_internal_cmd = mv_post_int_cmd,
565 .freeze = mv_eh_freeze,
566 .thaw = mv_eh_thaw,
567
568 .scr_read = mv_scr_read,
569 .scr_write = mv_scr_write,
570
571 .port_start = mv_port_start,
572 .port_stop = mv_port_stop,
573 };
574
575 static const struct ata_port_info mv_port_info[] = {
576 { /* chip_504x */
577 .flags = MV_COMMON_FLAGS,
578 .pio_mask = 0x1f, /* pio0-4 */
579 .udma_mask = ATA_UDMA6,
580 .port_ops = &mv5_ops,
581 },
582 { /* chip_508x */
583 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
584 .pio_mask = 0x1f, /* pio0-4 */
585 .udma_mask = ATA_UDMA6,
586 .port_ops = &mv5_ops,
587 },
588 { /* chip_5080 */
589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
590 .pio_mask = 0x1f, /* pio0-4 */
591 .udma_mask = ATA_UDMA6,
592 .port_ops = &mv5_ops,
593 },
594 { /* chip_604x */
595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
596 .pio_mask = 0x1f, /* pio0-4 */
597 .udma_mask = ATA_UDMA6,
598 .port_ops = &mv6_ops,
599 },
600 { /* chip_608x */
601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602 MV_FLAG_DUAL_HC,
603 .pio_mask = 0x1f, /* pio0-4 */
604 .udma_mask = ATA_UDMA6,
605 .port_ops = &mv6_ops,
606 },
607 { /* chip_6042 */
608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
609 .pio_mask = 0x1f, /* pio0-4 */
610 .udma_mask = ATA_UDMA6,
611 .port_ops = &mv_iie_ops,
612 },
613 { /* chip_7042 */
614 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
615 .pio_mask = 0x1f, /* pio0-4 */
616 .udma_mask = ATA_UDMA6,
617 .port_ops = &mv_iie_ops,
618 },
619 };
620
621 static const struct pci_device_id mv_pci_tbl[] = {
622 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
623 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
624 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
625 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
626 /* RocketRAID 1740/174x have different identifiers */
627 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
628 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
629
630 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
631 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
632 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
633 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
634 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
635
636 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
637
638 /* Adaptec 1430SA */
639 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
640
641 /* Marvell 7042 support */
642 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
643
644 /* Highpoint RocketRAID PCIe series */
645 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
646 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
647
648 { } /* terminate list */
649 };
650
651 static struct pci_driver mv_pci_driver = {
652 .name = DRV_NAME,
653 .id_table = mv_pci_tbl,
654 .probe = mv_init_one,
655 .remove = ata_pci_remove_one,
656 };
657
658 static const struct mv_hw_ops mv5xxx_ops = {
659 .phy_errata = mv5_phy_errata,
660 .enable_leds = mv5_enable_leds,
661 .read_preamp = mv5_read_preamp,
662 .reset_hc = mv5_reset_hc,
663 .reset_flash = mv5_reset_flash,
664 .reset_bus = mv5_reset_bus,
665 };
666
667 static const struct mv_hw_ops mv6xxx_ops = {
668 .phy_errata = mv6_phy_errata,
669 .enable_leds = mv6_enable_leds,
670 .read_preamp = mv6_read_preamp,
671 .reset_hc = mv6_reset_hc,
672 .reset_flash = mv6_reset_flash,
673 .reset_bus = mv_reset_pci_bus,
674 };
675
676 /*
677 * module options
678 */
679 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
680
681
682 /* move to PCI layer or libata core? */
683 static int pci_go_64(struct pci_dev *pdev)
684 {
685 int rc;
686
687 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
688 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
689 if (rc) {
690 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
691 if (rc) {
692 dev_printk(KERN_ERR, &pdev->dev,
693 "64-bit DMA enable failed\n");
694 return rc;
695 }
696 }
697 } else {
698 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
699 if (rc) {
700 dev_printk(KERN_ERR, &pdev->dev,
701 "32-bit DMA enable failed\n");
702 return rc;
703 }
704 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
705 if (rc) {
706 dev_printk(KERN_ERR, &pdev->dev,
707 "32-bit consistent DMA enable failed\n");
708 return rc;
709 }
710 }
711
712 return rc;
713 }
714
715 /*
716 * Functions
717 */
718
719 static inline void writelfl(unsigned long data, void __iomem *addr)
720 {
721 writel(data, addr);
722 (void) readl(addr); /* flush to avoid PCI posted write */
723 }
724
725 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
726 {
727 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
728 }
729
730 static inline unsigned int mv_hc_from_port(unsigned int port)
731 {
732 return port >> MV_PORT_HC_SHIFT;
733 }
734
735 static inline unsigned int mv_hardport_from_port(unsigned int port)
736 {
737 return port & MV_PORT_MASK;
738 }
739
740 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
741 unsigned int port)
742 {
743 return mv_hc_base(base, mv_hc_from_port(port));
744 }
745
746 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
747 {
748 return mv_hc_base_from_port(base, port) +
749 MV_SATAHC_ARBTR_REG_SZ +
750 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
751 }
752
753 static inline void __iomem *mv_ap_base(struct ata_port *ap)
754 {
755 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
756 }
757
758 static inline int mv_get_hc_count(unsigned long port_flags)
759 {
760 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
761 }
762
763 static void mv_irq_clear(struct ata_port *ap)
764 {
765 }
766
767 static void mv_set_edma_ptrs(void __iomem *port_mmio,
768 struct mv_host_priv *hpriv,
769 struct mv_port_priv *pp)
770 {
771 u32 index;
772
773 /*
774 * initialize request queue
775 */
776 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
777
778 WARN_ON(pp->crqb_dma & 0x3ff);
779 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
780 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
781 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
782
783 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
784 writelfl((pp->crqb_dma & 0xffffffff) | index,
785 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
786 else
787 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
788
789 /*
790 * initialize response queue
791 */
792 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
793
794 WARN_ON(pp->crpb_dma & 0xff);
795 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
796
797 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
798 writelfl((pp->crpb_dma & 0xffffffff) | index,
799 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
800 else
801 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
802
803 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
804 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
805 }
806
807 /**
808 * mv_start_dma - Enable eDMA engine
809 * @base: port base address
810 * @pp: port private data
811 *
812 * Verify the local cache of the eDMA state is accurate with a
813 * WARN_ON.
814 *
815 * LOCKING:
816 * Inherited from caller.
817 */
818 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
819 struct mv_port_priv *pp)
820 {
821 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
822 /* clear EDMA event indicators, if any */
823 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
824
825 mv_set_edma_ptrs(base, hpriv, pp);
826
827 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
828 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
829 }
830 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
831 }
832
833 /**
834 * __mv_stop_dma - Disable eDMA engine
835 * @ap: ATA channel to manipulate
836 *
837 * Verify the local cache of the eDMA state is accurate with a
838 * WARN_ON.
839 *
840 * LOCKING:
841 * Inherited from caller.
842 */
843 static int __mv_stop_dma(struct ata_port *ap)
844 {
845 void __iomem *port_mmio = mv_ap_base(ap);
846 struct mv_port_priv *pp = ap->private_data;
847 u32 reg;
848 int i, err = 0;
849
850 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
851 /* Disable EDMA if active. The disable bit auto clears.
852 */
853 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
854 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
855 } else {
856 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
857 }
858
859 /* now properly wait for the eDMA to stop */
860 for (i = 1000; i > 0; i--) {
861 reg = readl(port_mmio + EDMA_CMD_OFS);
862 if (!(reg & EDMA_EN))
863 break;
864
865 udelay(100);
866 }
867
868 if (reg & EDMA_EN) {
869 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
870 err = -EIO;
871 }
872
873 return err;
874 }
875
876 static int mv_stop_dma(struct ata_port *ap)
877 {
878 unsigned long flags;
879 int rc;
880
881 spin_lock_irqsave(&ap->host->lock, flags);
882 rc = __mv_stop_dma(ap);
883 spin_unlock_irqrestore(&ap->host->lock, flags);
884
885 return rc;
886 }
887
888 #ifdef ATA_DEBUG
889 static void mv_dump_mem(void __iomem *start, unsigned bytes)
890 {
891 int b, w;
892 for (b = 0; b < bytes; ) {
893 DPRINTK("%p: ", start + b);
894 for (w = 0; b < bytes && w < 4; w++) {
895 printk("%08x ", readl(start + b));
896 b += sizeof(u32);
897 }
898 printk("\n");
899 }
900 }
901 #endif
902
903 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
904 {
905 #ifdef ATA_DEBUG
906 int b, w;
907 u32 dw;
908 for (b = 0; b < bytes; ) {
909 DPRINTK("%02x: ", b);
910 for (w = 0; b < bytes && w < 4; w++) {
911 (void) pci_read_config_dword(pdev, b, &dw);
912 printk("%08x ", dw);
913 b += sizeof(u32);
914 }
915 printk("\n");
916 }
917 #endif
918 }
919 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
920 struct pci_dev *pdev)
921 {
922 #ifdef ATA_DEBUG
923 void __iomem *hc_base = mv_hc_base(mmio_base,
924 port >> MV_PORT_HC_SHIFT);
925 void __iomem *port_base;
926 int start_port, num_ports, p, start_hc, num_hcs, hc;
927
928 if (0 > port) {
929 start_hc = start_port = 0;
930 num_ports = 8; /* shld be benign for 4 port devs */
931 num_hcs = 2;
932 } else {
933 start_hc = port >> MV_PORT_HC_SHIFT;
934 start_port = port;
935 num_ports = num_hcs = 1;
936 }
937 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
938 num_ports > 1 ? num_ports - 1 : start_port);
939
940 if (NULL != pdev) {
941 DPRINTK("PCI config space regs:\n");
942 mv_dump_pci_cfg(pdev, 0x68);
943 }
944 DPRINTK("PCI regs:\n");
945 mv_dump_mem(mmio_base+0xc00, 0x3c);
946 mv_dump_mem(mmio_base+0xd00, 0x34);
947 mv_dump_mem(mmio_base+0xf00, 0x4);
948 mv_dump_mem(mmio_base+0x1d00, 0x6c);
949 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
950 hc_base = mv_hc_base(mmio_base, hc);
951 DPRINTK("HC regs (HC %i):\n", hc);
952 mv_dump_mem(hc_base, 0x1c);
953 }
954 for (p = start_port; p < start_port + num_ports; p++) {
955 port_base = mv_port_base(mmio_base, p);
956 DPRINTK("EDMA regs (port %i):\n", p);
957 mv_dump_mem(port_base, 0x54);
958 DPRINTK("SATA regs (port %i):\n", p);
959 mv_dump_mem(port_base+0x300, 0x60);
960 }
961 #endif
962 }
963
964 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
965 {
966 unsigned int ofs;
967
968 switch (sc_reg_in) {
969 case SCR_STATUS:
970 case SCR_CONTROL:
971 case SCR_ERROR:
972 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
973 break;
974 case SCR_ACTIVE:
975 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
976 break;
977 default:
978 ofs = 0xffffffffU;
979 break;
980 }
981 return ofs;
982 }
983
984 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
985 {
986 unsigned int ofs = mv_scr_offset(sc_reg_in);
987
988 if (ofs != 0xffffffffU) {
989 *val = readl(mv_ap_base(ap) + ofs);
990 return 0;
991 } else
992 return -EINVAL;
993 }
994
995 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
996 {
997 unsigned int ofs = mv_scr_offset(sc_reg_in);
998
999 if (ofs != 0xffffffffU) {
1000 writelfl(val, mv_ap_base(ap) + ofs);
1001 return 0;
1002 } else
1003 return -EINVAL;
1004 }
1005
1006 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1007 void __iomem *port_mmio)
1008 {
1009 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1010
1011 /* set up non-NCQ EDMA configuration */
1012 cfg &= ~(1 << 9); /* disable eQue */
1013
1014 if (IS_GEN_I(hpriv)) {
1015 cfg &= ~0x1f; /* clear queue depth */
1016 cfg |= (1 << 8); /* enab config burst size mask */
1017 }
1018
1019 else if (IS_GEN_II(hpriv)) {
1020 cfg &= ~0x1f; /* clear queue depth */
1021 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1022 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1023 }
1024
1025 else if (IS_GEN_IIE(hpriv)) {
1026 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1027 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1028 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1029 cfg |= (1 << 18); /* enab early completion */
1030 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1031 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1032 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1033 }
1034
1035 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1036 }
1037
1038 /**
1039 * mv_port_start - Port specific init/start routine.
1040 * @ap: ATA channel to manipulate
1041 *
1042 * Allocate and point to DMA memory, init port private memory,
1043 * zero indices.
1044 *
1045 * LOCKING:
1046 * Inherited from caller.
1047 */
1048 static int mv_port_start(struct ata_port *ap)
1049 {
1050 struct device *dev = ap->host->dev;
1051 struct mv_host_priv *hpriv = ap->host->private_data;
1052 struct mv_port_priv *pp;
1053 void __iomem *port_mmio = mv_ap_base(ap);
1054 void *mem;
1055 dma_addr_t mem_dma;
1056 unsigned long flags;
1057 int rc;
1058
1059 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1060 if (!pp)
1061 return -ENOMEM;
1062
1063 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1064 GFP_KERNEL);
1065 if (!mem)
1066 return -ENOMEM;
1067 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1068
1069 rc = ata_pad_alloc(ap, dev);
1070 if (rc)
1071 return rc;
1072
1073 /* First item in chunk of DMA memory:
1074 * 32-slot command request table (CRQB), 32 bytes each in size
1075 */
1076 pp->crqb = mem;
1077 pp->crqb_dma = mem_dma;
1078 mem += MV_CRQB_Q_SZ;
1079 mem_dma += MV_CRQB_Q_SZ;
1080
1081 /* Second item:
1082 * 32-slot command response table (CRPB), 8 bytes each in size
1083 */
1084 pp->crpb = mem;
1085 pp->crpb_dma = mem_dma;
1086 mem += MV_CRPB_Q_SZ;
1087 mem_dma += MV_CRPB_Q_SZ;
1088
1089 /* Third item:
1090 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1091 */
1092 pp->sg_tbl = mem;
1093 pp->sg_tbl_dma = mem_dma;
1094
1095 spin_lock_irqsave(&ap->host->lock, flags);
1096
1097 mv_edma_cfg(ap, hpriv, port_mmio);
1098
1099 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1100
1101 spin_unlock_irqrestore(&ap->host->lock, flags);
1102
1103 /* Don't turn on EDMA here...do it before DMA commands only. Else
1104 * we'll be unable to send non-data, PIO, etc due to restricted access
1105 * to shadow regs.
1106 */
1107 ap->private_data = pp;
1108 return 0;
1109 }
1110
1111 /**
1112 * mv_port_stop - Port specific cleanup/stop routine.
1113 * @ap: ATA channel to manipulate
1114 *
1115 * Stop DMA, cleanup port memory.
1116 *
1117 * LOCKING:
1118 * This routine uses the host lock to protect the DMA stop.
1119 */
1120 static void mv_port_stop(struct ata_port *ap)
1121 {
1122 mv_stop_dma(ap);
1123 }
1124
1125 /**
1126 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1127 * @qc: queued command whose SG list to source from
1128 *
1129 * Populate the SG list and mark the last entry.
1130 *
1131 * LOCKING:
1132 * Inherited from caller.
1133 */
1134 static void mv_fill_sg(struct ata_queued_cmd *qc)
1135 {
1136 struct mv_port_priv *pp = qc->ap->private_data;
1137 struct scatterlist *sg;
1138 struct mv_sg *mv_sg, *last_sg = NULL;
1139 unsigned int si;
1140
1141 mv_sg = pp->sg_tbl;
1142 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1143 dma_addr_t addr = sg_dma_address(sg);
1144 u32 sg_len = sg_dma_len(sg);
1145
1146 while (sg_len) {
1147 u32 offset = addr & 0xffff;
1148 u32 len = sg_len;
1149
1150 if ((offset + sg_len > 0x10000))
1151 len = 0x10000 - offset;
1152
1153 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1154 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1155 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1156
1157 sg_len -= len;
1158 addr += len;
1159
1160 last_sg = mv_sg;
1161 mv_sg++;
1162 }
1163 }
1164
1165 if (likely(last_sg))
1166 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1167 }
1168
1169 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1170 {
1171 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1172 (last ? CRQB_CMD_LAST : 0);
1173 *cmdw = cpu_to_le16(tmp);
1174 }
1175
1176 /**
1177 * mv_qc_prep - Host specific command preparation.
1178 * @qc: queued command to prepare
1179 *
1180 * This routine simply redirects to the general purpose routine
1181 * if command is not DMA. Else, it handles prep of the CRQB
1182 * (command request block), does some sanity checking, and calls
1183 * the SG load routine.
1184 *
1185 * LOCKING:
1186 * Inherited from caller.
1187 */
1188 static void mv_qc_prep(struct ata_queued_cmd *qc)
1189 {
1190 struct ata_port *ap = qc->ap;
1191 struct mv_port_priv *pp = ap->private_data;
1192 __le16 *cw;
1193 struct ata_taskfile *tf;
1194 u16 flags = 0;
1195 unsigned in_index;
1196
1197 if (qc->tf.protocol != ATA_PROT_DMA)
1198 return;
1199
1200 /* Fill in command request block
1201 */
1202 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1203 flags |= CRQB_FLAG_READ;
1204 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1205 flags |= qc->tag << CRQB_TAG_SHIFT;
1206 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1207
1208 /* get current queue index from software */
1209 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1210
1211 pp->crqb[in_index].sg_addr =
1212 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1213 pp->crqb[in_index].sg_addr_hi =
1214 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1215 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1216
1217 cw = &pp->crqb[in_index].ata_cmd[0];
1218 tf = &qc->tf;
1219
1220 /* Sadly, the CRQB cannot accomodate all registers--there are
1221 * only 11 bytes...so we must pick and choose required
1222 * registers based on the command. So, we drop feature and
1223 * hob_feature for [RW] DMA commands, but they are needed for
1224 * NCQ. NCQ will drop hob_nsect.
1225 */
1226 switch (tf->command) {
1227 case ATA_CMD_READ:
1228 case ATA_CMD_READ_EXT:
1229 case ATA_CMD_WRITE:
1230 case ATA_CMD_WRITE_EXT:
1231 case ATA_CMD_WRITE_FUA_EXT:
1232 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1233 break;
1234 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1235 case ATA_CMD_FPDMA_READ:
1236 case ATA_CMD_FPDMA_WRITE:
1237 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1238 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1239 break;
1240 #endif /* FIXME: remove this line when NCQ added */
1241 default:
1242 /* The only other commands EDMA supports in non-queued and
1243 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1244 * of which are defined/used by Linux. If we get here, this
1245 * driver needs work.
1246 *
1247 * FIXME: modify libata to give qc_prep a return value and
1248 * return error here.
1249 */
1250 BUG_ON(tf->command);
1251 break;
1252 }
1253 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1254 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1255 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1256 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1257 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1258 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1259 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1260 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1261 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1262
1263 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1264 return;
1265 mv_fill_sg(qc);
1266 }
1267
1268 /**
1269 * mv_qc_prep_iie - Host specific command preparation.
1270 * @qc: queued command to prepare
1271 *
1272 * This routine simply redirects to the general purpose routine
1273 * if command is not DMA. Else, it handles prep of the CRQB
1274 * (command request block), does some sanity checking, and calls
1275 * the SG load routine.
1276 *
1277 * LOCKING:
1278 * Inherited from caller.
1279 */
1280 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1281 {
1282 struct ata_port *ap = qc->ap;
1283 struct mv_port_priv *pp = ap->private_data;
1284 struct mv_crqb_iie *crqb;
1285 struct ata_taskfile *tf;
1286 unsigned in_index;
1287 u32 flags = 0;
1288
1289 if (qc->tf.protocol != ATA_PROT_DMA)
1290 return;
1291
1292 /* Fill in Gen IIE command request block
1293 */
1294 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1295 flags |= CRQB_FLAG_READ;
1296
1297 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1298 flags |= qc->tag << CRQB_TAG_SHIFT;
1299 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1300 what we use as our tag */
1301
1302 /* get current queue index from software */
1303 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1304
1305 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1306 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1307 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1308 crqb->flags = cpu_to_le32(flags);
1309
1310 tf = &qc->tf;
1311 crqb->ata_cmd[0] = cpu_to_le32(
1312 (tf->command << 16) |
1313 (tf->feature << 24)
1314 );
1315 crqb->ata_cmd[1] = cpu_to_le32(
1316 (tf->lbal << 0) |
1317 (tf->lbam << 8) |
1318 (tf->lbah << 16) |
1319 (tf->device << 24)
1320 );
1321 crqb->ata_cmd[2] = cpu_to_le32(
1322 (tf->hob_lbal << 0) |
1323 (tf->hob_lbam << 8) |
1324 (tf->hob_lbah << 16) |
1325 (tf->hob_feature << 24)
1326 );
1327 crqb->ata_cmd[3] = cpu_to_le32(
1328 (tf->nsect << 0) |
1329 (tf->hob_nsect << 8)
1330 );
1331
1332 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1333 return;
1334 mv_fill_sg(qc);
1335 }
1336
1337 /**
1338 * mv_qc_issue - Initiate a command to the host
1339 * @qc: queued command to start
1340 *
1341 * This routine simply redirects to the general purpose routine
1342 * if command is not DMA. Else, it sanity checks our local
1343 * caches of the request producer/consumer indices then enables
1344 * DMA and bumps the request producer index.
1345 *
1346 * LOCKING:
1347 * Inherited from caller.
1348 */
1349 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1350 {
1351 struct ata_port *ap = qc->ap;
1352 void __iomem *port_mmio = mv_ap_base(ap);
1353 struct mv_port_priv *pp = ap->private_data;
1354 struct mv_host_priv *hpriv = ap->host->private_data;
1355 u32 in_index;
1356
1357 if (qc->tf.protocol != ATA_PROT_DMA) {
1358 /* We're about to send a non-EDMA capable command to the
1359 * port. Turn off EDMA so there won't be problems accessing
1360 * shadow block, etc registers.
1361 */
1362 __mv_stop_dma(ap);
1363 return ata_qc_issue_prot(qc);
1364 }
1365
1366 mv_start_dma(port_mmio, hpriv, pp);
1367
1368 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1369
1370 /* until we do queuing, the queue should be empty at this point */
1371 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1372 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1373
1374 pp->req_idx++;
1375
1376 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1377
1378 /* and write the request in pointer to kick the EDMA to life */
1379 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1380 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1381
1382 return 0;
1383 }
1384
1385 /**
1386 * mv_err_intr - Handle error interrupts on the port
1387 * @ap: ATA channel to manipulate
1388 * @reset_allowed: bool: 0 == don't trigger from reset here
1389 *
1390 * In most cases, just clear the interrupt and move on. However,
1391 * some cases require an eDMA reset, which is done right before
1392 * the COMRESET in mv_phy_reset(). The SERR case requires a
1393 * clear of pending errors in the SATA SERROR register. Finally,
1394 * if the port disabled DMA, update our cached copy to match.
1395 *
1396 * LOCKING:
1397 * Inherited from caller.
1398 */
1399 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1400 {
1401 void __iomem *port_mmio = mv_ap_base(ap);
1402 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1403 struct mv_port_priv *pp = ap->private_data;
1404 struct mv_host_priv *hpriv = ap->host->private_data;
1405 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1406 unsigned int action = 0, err_mask = 0;
1407 struct ata_eh_info *ehi = &ap->link.eh_info;
1408
1409 ata_ehi_clear_desc(ehi);
1410
1411 if (!edma_enabled) {
1412 /* just a guess: do we need to do this? should we
1413 * expand this, and do it in all cases?
1414 */
1415 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1416 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1417 }
1418
1419 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1420
1421 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1422
1423 /*
1424 * all generations share these EDMA error cause bits
1425 */
1426
1427 if (edma_err_cause & EDMA_ERR_DEV)
1428 err_mask |= AC_ERR_DEV;
1429 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1430 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1431 EDMA_ERR_INTRL_PAR)) {
1432 err_mask |= AC_ERR_ATA_BUS;
1433 action |= ATA_EH_HARDRESET;
1434 ata_ehi_push_desc(ehi, "parity error");
1435 }
1436 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1437 ata_ehi_hotplugged(ehi);
1438 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1439 "dev disconnect" : "dev connect");
1440 }
1441
1442 if (IS_GEN_I(hpriv)) {
1443 eh_freeze_mask = EDMA_EH_FREEZE_5;
1444
1445 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1446 struct mv_port_priv *pp = ap->private_data;
1447 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1448 ata_ehi_push_desc(ehi, "EDMA self-disable");
1449 }
1450 } else {
1451 eh_freeze_mask = EDMA_EH_FREEZE;
1452
1453 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1454 struct mv_port_priv *pp = ap->private_data;
1455 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1456 ata_ehi_push_desc(ehi, "EDMA self-disable");
1457 }
1458
1459 if (edma_err_cause & EDMA_ERR_SERR) {
1460 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1461 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1462 err_mask = AC_ERR_ATA_BUS;
1463 action |= ATA_EH_HARDRESET;
1464 }
1465 }
1466
1467 /* Clear EDMA now that SERR cleanup done */
1468 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1469
1470 if (!err_mask) {
1471 err_mask = AC_ERR_OTHER;
1472 action |= ATA_EH_HARDRESET;
1473 }
1474
1475 ehi->serror |= serr;
1476 ehi->action |= action;
1477
1478 if (qc)
1479 qc->err_mask |= err_mask;
1480 else
1481 ehi->err_mask |= err_mask;
1482
1483 if (edma_err_cause & eh_freeze_mask)
1484 ata_port_freeze(ap);
1485 else
1486 ata_port_abort(ap);
1487 }
1488
1489 static void mv_intr_pio(struct ata_port *ap)
1490 {
1491 struct ata_queued_cmd *qc;
1492 u8 ata_status;
1493
1494 /* ignore spurious intr if drive still BUSY */
1495 ata_status = readb(ap->ioaddr.status_addr);
1496 if (unlikely(ata_status & ATA_BUSY))
1497 return;
1498
1499 /* get active ATA command */
1500 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1501 if (unlikely(!qc)) /* no active tag */
1502 return;
1503 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1504 return;
1505
1506 /* and finally, complete the ATA command */
1507 qc->err_mask |= ac_err_mask(ata_status);
1508 ata_qc_complete(qc);
1509 }
1510
1511 static void mv_intr_edma(struct ata_port *ap)
1512 {
1513 void __iomem *port_mmio = mv_ap_base(ap);
1514 struct mv_host_priv *hpriv = ap->host->private_data;
1515 struct mv_port_priv *pp = ap->private_data;
1516 struct ata_queued_cmd *qc;
1517 u32 out_index, in_index;
1518 bool work_done = false;
1519
1520 /* get h/w response queue pointer */
1521 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1522 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1523
1524 while (1) {
1525 u16 status;
1526 unsigned int tag;
1527
1528 /* get s/w response queue last-read pointer, and compare */
1529 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1530 if (in_index == out_index)
1531 break;
1532
1533 /* 50xx: get active ATA command */
1534 if (IS_GEN_I(hpriv))
1535 tag = ap->link.active_tag;
1536
1537 /* Gen II/IIE: get active ATA command via tag, to enable
1538 * support for queueing. this works transparently for
1539 * queued and non-queued modes.
1540 */
1541 else if (IS_GEN_II(hpriv))
1542 tag = (le16_to_cpu(pp->crpb[out_index].id)
1543 >> CRPB_IOID_SHIFT_6) & 0x3f;
1544
1545 else /* IS_GEN_IIE */
1546 tag = (le16_to_cpu(pp->crpb[out_index].id)
1547 >> CRPB_IOID_SHIFT_7) & 0x3f;
1548
1549 qc = ata_qc_from_tag(ap, tag);
1550
1551 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1552 * bits (WARNING: might not necessarily be associated
1553 * with this command), which -should- be clear
1554 * if all is well
1555 */
1556 status = le16_to_cpu(pp->crpb[out_index].flags);
1557 if (unlikely(status & 0xff)) {
1558 mv_err_intr(ap, qc);
1559 return;
1560 }
1561
1562 /* and finally, complete the ATA command */
1563 if (qc) {
1564 qc->err_mask |=
1565 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1566 ata_qc_complete(qc);
1567 }
1568
1569 /* advance software response queue pointer, to
1570 * indicate (after the loop completes) to hardware
1571 * that we have consumed a response queue entry.
1572 */
1573 work_done = true;
1574 pp->resp_idx++;
1575 }
1576
1577 if (work_done)
1578 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1579 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1580 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1581 }
1582
1583 /**
1584 * mv_host_intr - Handle all interrupts on the given host controller
1585 * @host: host specific structure
1586 * @relevant: port error bits relevant to this host controller
1587 * @hc: which host controller we're to look at
1588 *
1589 * Read then write clear the HC interrupt status then walk each
1590 * port connected to the HC and see if it needs servicing. Port
1591 * success ints are reported in the HC interrupt status reg, the
1592 * port error ints are reported in the higher level main
1593 * interrupt status register and thus are passed in via the
1594 * 'relevant' argument.
1595 *
1596 * LOCKING:
1597 * Inherited from caller.
1598 */
1599 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1600 {
1601 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1602 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1603 u32 hc_irq_cause;
1604 int port, port0;
1605
1606 if (hc == 0)
1607 port0 = 0;
1608 else
1609 port0 = MV_PORTS_PER_HC;
1610
1611 /* we'll need the HC success int register in most cases */
1612 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1613 if (!hc_irq_cause)
1614 return;
1615
1616 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1617
1618 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1619 hc, relevant, hc_irq_cause);
1620
1621 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1622 struct ata_port *ap = host->ports[port];
1623 struct mv_port_priv *pp = ap->private_data;
1624 int have_err_bits, hard_port, shift;
1625
1626 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1627 continue;
1628
1629 shift = port << 1; /* (port * 2) */
1630 if (port >= MV_PORTS_PER_HC) {
1631 shift++; /* skip bit 8 in the HC Main IRQ reg */
1632 }
1633 have_err_bits = ((PORT0_ERR << shift) & relevant);
1634
1635 if (unlikely(have_err_bits)) {
1636 struct ata_queued_cmd *qc;
1637
1638 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1639 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1640 continue;
1641
1642 mv_err_intr(ap, qc);
1643 continue;
1644 }
1645
1646 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1647
1648 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1649 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1650 mv_intr_edma(ap);
1651 } else {
1652 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1653 mv_intr_pio(ap);
1654 }
1655 }
1656 VPRINTK("EXIT\n");
1657 }
1658
1659 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1660 {
1661 struct mv_host_priv *hpriv = host->private_data;
1662 struct ata_port *ap;
1663 struct ata_queued_cmd *qc;
1664 struct ata_eh_info *ehi;
1665 unsigned int i, err_mask, printed = 0;
1666 u32 err_cause;
1667
1668 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1669
1670 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1671 err_cause);
1672
1673 DPRINTK("All regs @ PCI error\n");
1674 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1675
1676 writelfl(0, mmio + hpriv->irq_cause_ofs);
1677
1678 for (i = 0; i < host->n_ports; i++) {
1679 ap = host->ports[i];
1680 if (!ata_link_offline(&ap->link)) {
1681 ehi = &ap->link.eh_info;
1682 ata_ehi_clear_desc(ehi);
1683 if (!printed++)
1684 ata_ehi_push_desc(ehi,
1685 "PCI err cause 0x%08x", err_cause);
1686 err_mask = AC_ERR_HOST_BUS;
1687 ehi->action = ATA_EH_HARDRESET;
1688 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1689 if (qc)
1690 qc->err_mask |= err_mask;
1691 else
1692 ehi->err_mask |= err_mask;
1693
1694 ata_port_freeze(ap);
1695 }
1696 }
1697 }
1698
1699 /**
1700 * mv_interrupt - Main interrupt event handler
1701 * @irq: unused
1702 * @dev_instance: private data; in this case the host structure
1703 *
1704 * Read the read only register to determine if any host
1705 * controllers have pending interrupts. If so, call lower level
1706 * routine to handle. Also check for PCI errors which are only
1707 * reported here.
1708 *
1709 * LOCKING:
1710 * This routine holds the host lock while processing pending
1711 * interrupts.
1712 */
1713 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1714 {
1715 struct ata_host *host = dev_instance;
1716 unsigned int hc, handled = 0, n_hcs;
1717 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1718 u32 irq_stat;
1719
1720 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1721
1722 /* check the cases where we either have nothing pending or have read
1723 * a bogus register value which can indicate HW removal or PCI fault
1724 */
1725 if (!irq_stat || (0xffffffffU == irq_stat))
1726 return IRQ_NONE;
1727
1728 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1729 spin_lock(&host->lock);
1730
1731 if (unlikely(irq_stat & PCI_ERR)) {
1732 mv_pci_error(host, mmio);
1733 handled = 1;
1734 goto out_unlock; /* skip all other HC irq handling */
1735 }
1736
1737 for (hc = 0; hc < n_hcs; hc++) {
1738 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1739 if (relevant) {
1740 mv_host_intr(host, relevant, hc);
1741 handled = 1;
1742 }
1743 }
1744
1745 out_unlock:
1746 spin_unlock(&host->lock);
1747
1748 return IRQ_RETVAL(handled);
1749 }
1750
1751 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1752 {
1753 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1754 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1755
1756 return hc_mmio + ofs;
1757 }
1758
1759 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1760 {
1761 unsigned int ofs;
1762
1763 switch (sc_reg_in) {
1764 case SCR_STATUS:
1765 case SCR_ERROR:
1766 case SCR_CONTROL:
1767 ofs = sc_reg_in * sizeof(u32);
1768 break;
1769 default:
1770 ofs = 0xffffffffU;
1771 break;
1772 }
1773 return ofs;
1774 }
1775
1776 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1777 {
1778 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1779 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1780 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1781
1782 if (ofs != 0xffffffffU) {
1783 *val = readl(addr + ofs);
1784 return 0;
1785 } else
1786 return -EINVAL;
1787 }
1788
1789 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1790 {
1791 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1792 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1793 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1794
1795 if (ofs != 0xffffffffU) {
1796 writelfl(val, addr + ofs);
1797 return 0;
1798 } else
1799 return -EINVAL;
1800 }
1801
1802 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1803 {
1804 int early_5080;
1805
1806 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1807
1808 if (!early_5080) {
1809 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1810 tmp |= (1 << 0);
1811 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1812 }
1813
1814 mv_reset_pci_bus(pdev, mmio);
1815 }
1816
1817 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1818 {
1819 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1820 }
1821
1822 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1823 void __iomem *mmio)
1824 {
1825 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1826 u32 tmp;
1827
1828 tmp = readl(phy_mmio + MV5_PHY_MODE);
1829
1830 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1831 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1832 }
1833
1834 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1835 {
1836 u32 tmp;
1837
1838 writel(0, mmio + MV_GPIO_PORT_CTL);
1839
1840 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1841
1842 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1843 tmp |= ~(1 << 0);
1844 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1845 }
1846
1847 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1848 unsigned int port)
1849 {
1850 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1851 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1852 u32 tmp;
1853 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1854
1855 if (fix_apm_sq) {
1856 tmp = readl(phy_mmio + MV5_LT_MODE);
1857 tmp |= (1 << 19);
1858 writel(tmp, phy_mmio + MV5_LT_MODE);
1859
1860 tmp = readl(phy_mmio + MV5_PHY_CTL);
1861 tmp &= ~0x3;
1862 tmp |= 0x1;
1863 writel(tmp, phy_mmio + MV5_PHY_CTL);
1864 }
1865
1866 tmp = readl(phy_mmio + MV5_PHY_MODE);
1867 tmp &= ~mask;
1868 tmp |= hpriv->signal[port].pre;
1869 tmp |= hpriv->signal[port].amps;
1870 writel(tmp, phy_mmio + MV5_PHY_MODE);
1871 }
1872
1873
1874 #undef ZERO
1875 #define ZERO(reg) writel(0, port_mmio + (reg))
1876 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1877 unsigned int port)
1878 {
1879 void __iomem *port_mmio = mv_port_base(mmio, port);
1880
1881 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1882
1883 mv_channel_reset(hpriv, mmio, port);
1884
1885 ZERO(0x028); /* command */
1886 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1887 ZERO(0x004); /* timer */
1888 ZERO(0x008); /* irq err cause */
1889 ZERO(0x00c); /* irq err mask */
1890 ZERO(0x010); /* rq bah */
1891 ZERO(0x014); /* rq inp */
1892 ZERO(0x018); /* rq outp */
1893 ZERO(0x01c); /* respq bah */
1894 ZERO(0x024); /* respq outp */
1895 ZERO(0x020); /* respq inp */
1896 ZERO(0x02c); /* test control */
1897 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1898 }
1899 #undef ZERO
1900
1901 #define ZERO(reg) writel(0, hc_mmio + (reg))
1902 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1903 unsigned int hc)
1904 {
1905 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1906 u32 tmp;
1907
1908 ZERO(0x00c);
1909 ZERO(0x010);
1910 ZERO(0x014);
1911 ZERO(0x018);
1912
1913 tmp = readl(hc_mmio + 0x20);
1914 tmp &= 0x1c1c1c1c;
1915 tmp |= 0x03030303;
1916 writel(tmp, hc_mmio + 0x20);
1917 }
1918 #undef ZERO
1919
1920 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1921 unsigned int n_hc)
1922 {
1923 unsigned int hc, port;
1924
1925 for (hc = 0; hc < n_hc; hc++) {
1926 for (port = 0; port < MV_PORTS_PER_HC; port++)
1927 mv5_reset_hc_port(hpriv, mmio,
1928 (hc * MV_PORTS_PER_HC) + port);
1929
1930 mv5_reset_one_hc(hpriv, mmio, hc);
1931 }
1932
1933 return 0;
1934 }
1935
1936 #undef ZERO
1937 #define ZERO(reg) writel(0, mmio + (reg))
1938 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1939 {
1940 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1941 struct mv_host_priv *hpriv = host->private_data;
1942 u32 tmp;
1943
1944 tmp = readl(mmio + MV_PCI_MODE);
1945 tmp &= 0xff00ffff;
1946 writel(tmp, mmio + MV_PCI_MODE);
1947
1948 ZERO(MV_PCI_DISC_TIMER);
1949 ZERO(MV_PCI_MSI_TRIGGER);
1950 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1951 ZERO(HC_MAIN_IRQ_MASK_OFS);
1952 ZERO(MV_PCI_SERR_MASK);
1953 ZERO(hpriv->irq_cause_ofs);
1954 ZERO(hpriv->irq_mask_ofs);
1955 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1956 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1957 ZERO(MV_PCI_ERR_ATTRIBUTE);
1958 ZERO(MV_PCI_ERR_COMMAND);
1959 }
1960 #undef ZERO
1961
1962 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1963 {
1964 u32 tmp;
1965
1966 mv5_reset_flash(hpriv, mmio);
1967
1968 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1969 tmp &= 0x3;
1970 tmp |= (1 << 5) | (1 << 6);
1971 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1972 }
1973
1974 /**
1975 * mv6_reset_hc - Perform the 6xxx global soft reset
1976 * @mmio: base address of the HBA
1977 *
1978 * This routine only applies to 6xxx parts.
1979 *
1980 * LOCKING:
1981 * Inherited from caller.
1982 */
1983 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1984 unsigned int n_hc)
1985 {
1986 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1987 int i, rc = 0;
1988 u32 t;
1989
1990 /* Following procedure defined in PCI "main command and status
1991 * register" table.
1992 */
1993 t = readl(reg);
1994 writel(t | STOP_PCI_MASTER, reg);
1995
1996 for (i = 0; i < 1000; i++) {
1997 udelay(1);
1998 t = readl(reg);
1999 if (PCI_MASTER_EMPTY & t)
2000 break;
2001 }
2002 if (!(PCI_MASTER_EMPTY & t)) {
2003 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2004 rc = 1;
2005 goto done;
2006 }
2007
2008 /* set reset */
2009 i = 5;
2010 do {
2011 writel(t | GLOB_SFT_RST, reg);
2012 t = readl(reg);
2013 udelay(1);
2014 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2015
2016 if (!(GLOB_SFT_RST & t)) {
2017 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2018 rc = 1;
2019 goto done;
2020 }
2021
2022 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2023 i = 5;
2024 do {
2025 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2026 t = readl(reg);
2027 udelay(1);
2028 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2029
2030 if (GLOB_SFT_RST & t) {
2031 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2032 rc = 1;
2033 }
2034 done:
2035 return rc;
2036 }
2037
2038 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2039 void __iomem *mmio)
2040 {
2041 void __iomem *port_mmio;
2042 u32 tmp;
2043
2044 tmp = readl(mmio + MV_RESET_CFG);
2045 if ((tmp & (1 << 0)) == 0) {
2046 hpriv->signal[idx].amps = 0x7 << 8;
2047 hpriv->signal[idx].pre = 0x1 << 5;
2048 return;
2049 }
2050
2051 port_mmio = mv_port_base(mmio, idx);
2052 tmp = readl(port_mmio + PHY_MODE2);
2053
2054 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2055 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2056 }
2057
2058 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2059 {
2060 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2061 }
2062
2063 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2064 unsigned int port)
2065 {
2066 void __iomem *port_mmio = mv_port_base(mmio, port);
2067
2068 u32 hp_flags = hpriv->hp_flags;
2069 int fix_phy_mode2 =
2070 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2071 int fix_phy_mode4 =
2072 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2073 u32 m2, tmp;
2074
2075 if (fix_phy_mode2) {
2076 m2 = readl(port_mmio + PHY_MODE2);
2077 m2 &= ~(1 << 16);
2078 m2 |= (1 << 31);
2079 writel(m2, port_mmio + PHY_MODE2);
2080
2081 udelay(200);
2082
2083 m2 = readl(port_mmio + PHY_MODE2);
2084 m2 &= ~((1 << 16) | (1 << 31));
2085 writel(m2, port_mmio + PHY_MODE2);
2086
2087 udelay(200);
2088 }
2089
2090 /* who knows what this magic does */
2091 tmp = readl(port_mmio + PHY_MODE3);
2092 tmp &= ~0x7F800000;
2093 tmp |= 0x2A800000;
2094 writel(tmp, port_mmio + PHY_MODE3);
2095
2096 if (fix_phy_mode4) {
2097 u32 m4;
2098
2099 m4 = readl(port_mmio + PHY_MODE4);
2100
2101 if (hp_flags & MV_HP_ERRATA_60X1B2)
2102 tmp = readl(port_mmio + 0x310);
2103
2104 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2105
2106 writel(m4, port_mmio + PHY_MODE4);
2107
2108 if (hp_flags & MV_HP_ERRATA_60X1B2)
2109 writel(tmp, port_mmio + 0x310);
2110 }
2111
2112 /* Revert values of pre-emphasis and signal amps to the saved ones */
2113 m2 = readl(port_mmio + PHY_MODE2);
2114
2115 m2 &= ~MV_M2_PREAMP_MASK;
2116 m2 |= hpriv->signal[port].amps;
2117 m2 |= hpriv->signal[port].pre;
2118 m2 &= ~(1 << 16);
2119
2120 /* according to mvSata 3.6.1, some IIE values are fixed */
2121 if (IS_GEN_IIE(hpriv)) {
2122 m2 &= ~0xC30FF01F;
2123 m2 |= 0x0000900F;
2124 }
2125
2126 writel(m2, port_mmio + PHY_MODE2);
2127 }
2128
2129 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2130 unsigned int port_no)
2131 {
2132 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2133
2134 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2135
2136 if (IS_GEN_II(hpriv)) {
2137 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2138 ifctl |= (1 << 7); /* enable gen2i speed */
2139 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2140 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2141 }
2142
2143 udelay(25); /* allow reset propagation */
2144
2145 /* Spec never mentions clearing the bit. Marvell's driver does
2146 * clear the bit, however.
2147 */
2148 writelfl(0, port_mmio + EDMA_CMD_OFS);
2149
2150 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2151
2152 if (IS_GEN_I(hpriv))
2153 mdelay(1);
2154 }
2155
2156 /**
2157 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2158 * @ap: ATA channel to manipulate
2159 *
2160 * Part of this is taken from __sata_phy_reset and modified to
2161 * not sleep since this routine gets called from interrupt level.
2162 *
2163 * LOCKING:
2164 * Inherited from caller. This is coded to safe to call at
2165 * interrupt level, i.e. it does not sleep.
2166 */
2167 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2168 unsigned long deadline)
2169 {
2170 struct mv_port_priv *pp = ap->private_data;
2171 struct mv_host_priv *hpriv = ap->host->private_data;
2172 void __iomem *port_mmio = mv_ap_base(ap);
2173 int retry = 5;
2174 u32 sstatus;
2175
2176 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2177
2178 #ifdef DEBUG
2179 {
2180 u32 sstatus, serror, scontrol;
2181
2182 mv_scr_read(ap, SCR_STATUS, &sstatus);
2183 mv_scr_read(ap, SCR_ERROR, &serror);
2184 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2185 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2186 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2187 }
2188 #endif
2189
2190 /* Issue COMRESET via SControl */
2191 comreset_retry:
2192 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2193 msleep(1);
2194
2195 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2196 msleep(20);
2197
2198 do {
2199 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2200 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2201 break;
2202
2203 msleep(1);
2204 } while (time_before(jiffies, deadline));
2205
2206 /* work around errata */
2207 if (IS_GEN_II(hpriv) &&
2208 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2209 (retry-- > 0))
2210 goto comreset_retry;
2211
2212 #ifdef DEBUG
2213 {
2214 u32 sstatus, serror, scontrol;
2215
2216 mv_scr_read(ap, SCR_STATUS, &sstatus);
2217 mv_scr_read(ap, SCR_ERROR, &serror);
2218 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2219 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2220 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2221 }
2222 #endif
2223
2224 if (ata_link_offline(&ap->link)) {
2225 *class = ATA_DEV_NONE;
2226 return;
2227 }
2228
2229 /* even after SStatus reflects that device is ready,
2230 * it seems to take a while for link to be fully
2231 * established (and thus Status no longer 0x80/0x7F),
2232 * so we poll a bit for that, here.
2233 */
2234 retry = 20;
2235 while (1) {
2236 u8 drv_stat = ata_check_status(ap);
2237 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2238 break;
2239 msleep(500);
2240 if (retry-- <= 0)
2241 break;
2242 if (time_after(jiffies, deadline))
2243 break;
2244 }
2245
2246 /* FIXME: if we passed the deadline, the following
2247 * code probably produces an invalid result
2248 */
2249
2250 /* finally, read device signature from TF registers */
2251 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2252
2253 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2254
2255 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2256
2257 VPRINTK("EXIT\n");
2258 }
2259
2260 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2261 {
2262 struct ata_port *ap = link->ap;
2263 struct mv_port_priv *pp = ap->private_data;
2264 struct ata_eh_context *ehc = &link->eh_context;
2265 int rc;
2266
2267 rc = mv_stop_dma(ap);
2268 if (rc)
2269 ehc->i.action |= ATA_EH_HARDRESET;
2270
2271 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2272 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2273 ehc->i.action |= ATA_EH_HARDRESET;
2274 }
2275
2276 /* if we're about to do hardreset, nothing more to do */
2277 if (ehc->i.action & ATA_EH_HARDRESET)
2278 return 0;
2279
2280 if (ata_link_online(link))
2281 rc = ata_wait_ready(ap, deadline);
2282 else
2283 rc = -ENODEV;
2284
2285 return rc;
2286 }
2287
2288 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2289 unsigned long deadline)
2290 {
2291 struct ata_port *ap = link->ap;
2292 struct mv_host_priv *hpriv = ap->host->private_data;
2293 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2294
2295 mv_stop_dma(ap);
2296
2297 mv_channel_reset(hpriv, mmio, ap->port_no);
2298
2299 mv_phy_reset(ap, class, deadline);
2300
2301 return 0;
2302 }
2303
2304 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2305 {
2306 struct ata_port *ap = link->ap;
2307 u32 serr;
2308
2309 /* print link status */
2310 sata_print_link_status(link);
2311
2312 /* clear SError */
2313 sata_scr_read(link, SCR_ERROR, &serr);
2314 sata_scr_write_flush(link, SCR_ERROR, serr);
2315
2316 /* bail out if no device is present */
2317 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2318 DPRINTK("EXIT, no device\n");
2319 return;
2320 }
2321
2322 /* set up device control */
2323 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2324 }
2325
2326 static void mv_error_handler(struct ata_port *ap)
2327 {
2328 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2329 mv_hardreset, mv_postreset);
2330 }
2331
2332 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2333 {
2334 mv_stop_dma(qc->ap);
2335 }
2336
2337 static void mv_eh_freeze(struct ata_port *ap)
2338 {
2339 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2340 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2341 u32 tmp, mask;
2342 unsigned int shift;
2343
2344 /* FIXME: handle coalescing completion events properly */
2345
2346 shift = ap->port_no * 2;
2347 if (hc > 0)
2348 shift++;
2349
2350 mask = 0x3 << shift;
2351
2352 /* disable assertion of portN err, done events */
2353 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2354 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2355 }
2356
2357 static void mv_eh_thaw(struct ata_port *ap)
2358 {
2359 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2360 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2361 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2362 void __iomem *port_mmio = mv_ap_base(ap);
2363 u32 tmp, mask, hc_irq_cause;
2364 unsigned int shift, hc_port_no = ap->port_no;
2365
2366 /* FIXME: handle coalescing completion events properly */
2367
2368 shift = ap->port_no * 2;
2369 if (hc > 0) {
2370 shift++;
2371 hc_port_no -= 4;
2372 }
2373
2374 mask = 0x3 << shift;
2375
2376 /* clear EDMA errors on this port */
2377 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2378
2379 /* clear pending irq events */
2380 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2381 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2382 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2383 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2384
2385 /* enable assertion of portN err, done events */
2386 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2387 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2388 }
2389
2390 /**
2391 * mv_port_init - Perform some early initialization on a single port.
2392 * @port: libata data structure storing shadow register addresses
2393 * @port_mmio: base address of the port
2394 *
2395 * Initialize shadow register mmio addresses, clear outstanding
2396 * interrupts on the port, and unmask interrupts for the future
2397 * start of the port.
2398 *
2399 * LOCKING:
2400 * Inherited from caller.
2401 */
2402 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2403 {
2404 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2405 unsigned serr_ofs;
2406
2407 /* PIO related setup
2408 */
2409 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2410 port->error_addr =
2411 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2412 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2413 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2414 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2415 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2416 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2417 port->status_addr =
2418 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2419 /* special case: control/altstatus doesn't have ATA_REG_ address */
2420 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2421
2422 /* unused: */
2423 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2424
2425 /* Clear any currently outstanding port interrupt conditions */
2426 serr_ofs = mv_scr_offset(SCR_ERROR);
2427 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2428 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2429
2430 /* unmask all EDMA error interrupts */
2431 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2432
2433 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2434 readl(port_mmio + EDMA_CFG_OFS),
2435 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2436 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2437 }
2438
2439 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2440 {
2441 struct pci_dev *pdev = to_pci_dev(host->dev);
2442 struct mv_host_priv *hpriv = host->private_data;
2443 u32 hp_flags = hpriv->hp_flags;
2444
2445 switch (board_idx) {
2446 case chip_5080:
2447 hpriv->ops = &mv5xxx_ops;
2448 hp_flags |= MV_HP_GEN_I;
2449
2450 switch (pdev->revision) {
2451 case 0x1:
2452 hp_flags |= MV_HP_ERRATA_50XXB0;
2453 break;
2454 case 0x3:
2455 hp_flags |= MV_HP_ERRATA_50XXB2;
2456 break;
2457 default:
2458 dev_printk(KERN_WARNING, &pdev->dev,
2459 "Applying 50XXB2 workarounds to unknown rev\n");
2460 hp_flags |= MV_HP_ERRATA_50XXB2;
2461 break;
2462 }
2463 break;
2464
2465 case chip_504x:
2466 case chip_508x:
2467 hpriv->ops = &mv5xxx_ops;
2468 hp_flags |= MV_HP_GEN_I;
2469
2470 switch (pdev->revision) {
2471 case 0x0:
2472 hp_flags |= MV_HP_ERRATA_50XXB0;
2473 break;
2474 case 0x3:
2475 hp_flags |= MV_HP_ERRATA_50XXB2;
2476 break;
2477 default:
2478 dev_printk(KERN_WARNING, &pdev->dev,
2479 "Applying B2 workarounds to unknown rev\n");
2480 hp_flags |= MV_HP_ERRATA_50XXB2;
2481 break;
2482 }
2483 break;
2484
2485 case chip_604x:
2486 case chip_608x:
2487 hpriv->ops = &mv6xxx_ops;
2488 hp_flags |= MV_HP_GEN_II;
2489
2490 switch (pdev->revision) {
2491 case 0x7:
2492 hp_flags |= MV_HP_ERRATA_60X1B2;
2493 break;
2494 case 0x9:
2495 hp_flags |= MV_HP_ERRATA_60X1C0;
2496 break;
2497 default:
2498 dev_printk(KERN_WARNING, &pdev->dev,
2499 "Applying B2 workarounds to unknown rev\n");
2500 hp_flags |= MV_HP_ERRATA_60X1B2;
2501 break;
2502 }
2503 break;
2504
2505 case chip_7042:
2506 hp_flags |= MV_HP_PCIE;
2507 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2508 (pdev->device == 0x2300 || pdev->device == 0x2310))
2509 {
2510 /*
2511 * Highpoint RocketRAID PCIe 23xx series cards:
2512 *
2513 * Unconfigured drives are treated as "Legacy"
2514 * by the BIOS, and it overwrites sector 8 with
2515 * a "Lgcy" metadata block prior to Linux boot.
2516 *
2517 * Configured drives (RAID or JBOD) leave sector 8
2518 * alone, but instead overwrite a high numbered
2519 * sector for the RAID metadata. This sector can
2520 * be determined exactly, by truncating the physical
2521 * drive capacity to a nice even GB value.
2522 *
2523 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2524 *
2525 * Warn the user, lest they think we're just buggy.
2526 */
2527 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2528 " BIOS CORRUPTS DATA on all attached drives,"
2529 " regardless of if/how they are configured."
2530 " BEWARE!\n");
2531 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2532 " use sectors 8-9 on \"Legacy\" drives,"
2533 " and avoid the final two gigabytes on"
2534 " all RocketRAID BIOS initialized drives.\n");
2535 }
2536 case chip_6042:
2537 hpriv->ops = &mv6xxx_ops;
2538 hp_flags |= MV_HP_GEN_IIE;
2539
2540 switch (pdev->revision) {
2541 case 0x0:
2542 hp_flags |= MV_HP_ERRATA_XX42A0;
2543 break;
2544 case 0x1:
2545 hp_flags |= MV_HP_ERRATA_60X1C0;
2546 break;
2547 default:
2548 dev_printk(KERN_WARNING, &pdev->dev,
2549 "Applying 60X1C0 workarounds to unknown rev\n");
2550 hp_flags |= MV_HP_ERRATA_60X1C0;
2551 break;
2552 }
2553 break;
2554
2555 default:
2556 dev_printk(KERN_ERR, &pdev->dev,
2557 "BUG: invalid board index %u\n", board_idx);
2558 return 1;
2559 }
2560
2561 hpriv->hp_flags = hp_flags;
2562 if (hp_flags & MV_HP_PCIE) {
2563 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2564 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2565 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2566 } else {
2567 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2568 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2569 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2570 }
2571
2572 return 0;
2573 }
2574
2575 /**
2576 * mv_init_host - Perform some early initialization of the host.
2577 * @host: ATA host to initialize
2578 * @board_idx: controller index
2579 *
2580 * If possible, do an early global reset of the host. Then do
2581 * our port init and clear/unmask all/relevant host interrupts.
2582 *
2583 * LOCKING:
2584 * Inherited from caller.
2585 */
2586 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2587 {
2588 int rc = 0, n_hc, port, hc;
2589 struct pci_dev *pdev = to_pci_dev(host->dev);
2590 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2591 struct mv_host_priv *hpriv = host->private_data;
2592
2593 /* global interrupt mask */
2594 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2595
2596 rc = mv_chip_id(host, board_idx);
2597 if (rc)
2598 goto done;
2599
2600 n_hc = mv_get_hc_count(host->ports[0]->flags);
2601
2602 for (port = 0; port < host->n_ports; port++)
2603 hpriv->ops->read_preamp(hpriv, port, mmio);
2604
2605 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2606 if (rc)
2607 goto done;
2608
2609 hpriv->ops->reset_flash(hpriv, mmio);
2610 hpriv->ops->reset_bus(pdev, mmio);
2611 hpriv->ops->enable_leds(hpriv, mmio);
2612
2613 for (port = 0; port < host->n_ports; port++) {
2614 if (IS_GEN_II(hpriv)) {
2615 void __iomem *port_mmio = mv_port_base(mmio, port);
2616
2617 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2618 ifctl |= (1 << 7); /* enable gen2i speed */
2619 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2620 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2621 }
2622
2623 hpriv->ops->phy_errata(hpriv, mmio, port);
2624 }
2625
2626 for (port = 0; port < host->n_ports; port++) {
2627 struct ata_port *ap = host->ports[port];
2628 void __iomem *port_mmio = mv_port_base(mmio, port);
2629 unsigned int offset = port_mmio - mmio;
2630
2631 mv_port_init(&ap->ioaddr, port_mmio);
2632
2633 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2634 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2635 }
2636
2637 for (hc = 0; hc < n_hc; hc++) {
2638 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2639
2640 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2641 "(before clear)=0x%08x\n", hc,
2642 readl(hc_mmio + HC_CFG_OFS),
2643 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2644
2645 /* Clear any currently outstanding hc interrupt conditions */
2646 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2647 }
2648
2649 /* Clear any currently outstanding host interrupt conditions */
2650 writelfl(0, mmio + hpriv->irq_cause_ofs);
2651
2652 /* and unmask interrupt generation for host regs */
2653 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2654
2655 if (IS_GEN_I(hpriv))
2656 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2657 else
2658 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2659
2660 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2661 "PCI int cause/mask=0x%08x/0x%08x\n",
2662 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2663 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2664 readl(mmio + hpriv->irq_cause_ofs),
2665 readl(mmio + hpriv->irq_mask_ofs));
2666
2667 done:
2668 return rc;
2669 }
2670
2671 /**
2672 * mv_print_info - Dump key info to kernel log for perusal.
2673 * @host: ATA host to print info about
2674 *
2675 * FIXME: complete this.
2676 *
2677 * LOCKING:
2678 * Inherited from caller.
2679 */
2680 static void mv_print_info(struct ata_host *host)
2681 {
2682 struct pci_dev *pdev = to_pci_dev(host->dev);
2683 struct mv_host_priv *hpriv = host->private_data;
2684 u8 scc;
2685 const char *scc_s, *gen;
2686
2687 /* Use this to determine the HW stepping of the chip so we know
2688 * what errata to workaround
2689 */
2690 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2691 if (scc == 0)
2692 scc_s = "SCSI";
2693 else if (scc == 0x01)
2694 scc_s = "RAID";
2695 else
2696 scc_s = "?";
2697
2698 if (IS_GEN_I(hpriv))
2699 gen = "I";
2700 else if (IS_GEN_II(hpriv))
2701 gen = "II";
2702 else if (IS_GEN_IIE(hpriv))
2703 gen = "IIE";
2704 else
2705 gen = "?";
2706
2707 dev_printk(KERN_INFO, &pdev->dev,
2708 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2709 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2710 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2711 }
2712
2713 /**
2714 * mv_init_one - handle a positive probe of a Marvell host
2715 * @pdev: PCI device found
2716 * @ent: PCI device ID entry for the matched host
2717 *
2718 * LOCKING:
2719 * Inherited from caller.
2720 */
2721 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2722 {
2723 static int printed_version;
2724 unsigned int board_idx = (unsigned int)ent->driver_data;
2725 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2726 struct ata_host *host;
2727 struct mv_host_priv *hpriv;
2728 int n_ports, rc;
2729
2730 if (!printed_version++)
2731 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2732
2733 /* allocate host */
2734 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2735
2736 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2737 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2738 if (!host || !hpriv)
2739 return -ENOMEM;
2740 host->private_data = hpriv;
2741
2742 /* acquire resources */
2743 rc = pcim_enable_device(pdev);
2744 if (rc)
2745 return rc;
2746
2747 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2748 if (rc == -EBUSY)
2749 pcim_pin_device(pdev);
2750 if (rc)
2751 return rc;
2752 host->iomap = pcim_iomap_table(pdev);
2753
2754 rc = pci_go_64(pdev);
2755 if (rc)
2756 return rc;
2757
2758 /* initialize adapter */
2759 rc = mv_init_host(host, board_idx);
2760 if (rc)
2761 return rc;
2762
2763 /* Enable interrupts */
2764 if (msi && pci_enable_msi(pdev))
2765 pci_intx(pdev, 1);
2766
2767 mv_dump_pci_cfg(pdev, 0x68);
2768 mv_print_info(host);
2769
2770 pci_set_master(pdev);
2771 pci_try_set_mwi(pdev);
2772 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2773 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2774 }
2775
2776 static int __init mv_init(void)
2777 {
2778 return pci_register_driver(&mv_pci_driver);
2779 }
2780
2781 static void __exit mv_exit(void)
2782 {
2783 pci_unregister_driver(&mv_pci_driver);
2784 }
2785
2786 MODULE_AUTHOR("Brett Russ");
2787 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2788 MODULE_LICENSE("GPL");
2789 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2790 MODULE_VERSION(DRV_VERSION);
2791
2792 module_param(msi, int, 0444);
2793 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2794
2795 module_init(mv_init);
2796 module_exit(mv_exit);
This page took 0.143089 seconds and 5 git commands to generate.