393fc655f8c1aa3b553cf6a1be17306749ca5327
[deliverable/linux.git] / drivers / ata / sata_mv.c
1 /*
2 * sata_mv.c - Marvell SATA support
3 *
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
6 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24 /*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
33
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
35
36 6) Add port multiplier support (intermediate)
37
38 8) Develop a low-power-consumption strategy, and implement it.
39
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
42 like that.
43
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
48
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
52
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
55
56 13) Verify that 7042 is fully supported. I only have a 6042.
57
58 */
59
60
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
74
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
77
78 enum {
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
83
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86
87 MV_PCI_REG_BASE = 0,
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
94
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
99
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104
105 MV_MAX_Q_DEPTH = 32,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
107
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
112 */
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
115 MV_MAX_SG_CT = 176,
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
118
119 MV_PORTS_PER_HC = 4,
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
123 MV_PORT_MASK = 3,
124
125 /* Host Flags */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
132
133 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1,
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
140
141 CRPB_FLAG_STATUS_SHIFT = 8,
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
144
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
146
147 /* PCI interface registers */
148
149 PCI_COMMAND_OFS = 0xc00,
150
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
155
156 MV_PCI_MODE = 0xd00,
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
166
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
170
171 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
172 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
173 PORT0_ERR = (1 << 0), /* shift by port # */
174 PORT0_DONE = (1 << 1), /* shift by port # */
175 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
176 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
177 PCI_ERR = (1 << 18),
178 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
179 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
180 PORTS_0_3_COAL_DONE = (1 << 8),
181 PORTS_4_7_COAL_DONE = (1 << 17),
182 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
183 GPIO_INT = (1 << 22),
184 SELF_INT = (1 << 23),
185 TWSI_INT = (1 << 24),
186 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
187 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
188 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
189 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
190 HC_MAIN_RSVD),
191 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
192 HC_MAIN_RSVD_5),
193
194 /* SATAHC registers */
195 HC_CFG_OFS = 0,
196
197 HC_IRQ_CAUSE_OFS = 0x14,
198 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
199 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
200 DEV_IRQ = (1 << 8), /* shift by port # */
201
202 /* Shadow block registers */
203 SHD_BLK_OFS = 0x100,
204 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
205
206 /* SATA registers */
207 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
208 SATA_ACTIVE_OFS = 0x350,
209 PHY_MODE3 = 0x310,
210 PHY_MODE4 = 0x314,
211 PHY_MODE2 = 0x330,
212 MV5_PHY_MODE = 0x74,
213 MV5_LT_MODE = 0x30,
214 MV5_PHY_CTL = 0x0C,
215 SATA_INTERFACE_CTL = 0x050,
216
217 MV_M2_PREAMP_MASK = 0x7e0,
218
219 /* Port registers */
220 EDMA_CFG_OFS = 0,
221 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
222 EDMA_CFG_NCQ = (1 << 5),
223 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
224 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
225 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
226
227 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
228 EDMA_ERR_IRQ_MASK_OFS = 0xc,
229 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
230 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
231 EDMA_ERR_DEV = (1 << 2), /* device error */
232 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
233 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
234 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
235 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
236 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
237 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
238 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
239 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
240 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
241 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
242 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
243 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
244 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
245 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
246 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
247 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
248 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
249 EDMA_ERR_OVERRUN_5 = (1 << 5),
250 EDMA_ERR_UNDERRUN_5 = (1 << 6),
251 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
252 EDMA_ERR_PRD_PAR |
253 EDMA_ERR_DEV_DCON |
254 EDMA_ERR_DEV_CON |
255 EDMA_ERR_SERR |
256 EDMA_ERR_SELF_DIS |
257 EDMA_ERR_CRQB_PAR |
258 EDMA_ERR_CRPB_PAR |
259 EDMA_ERR_INTRL_PAR |
260 EDMA_ERR_IORDY |
261 EDMA_ERR_LNK_CTRL_RX_2 |
262 EDMA_ERR_LNK_DATA_RX |
263 EDMA_ERR_LNK_DATA_TX |
264 EDMA_ERR_TRANS_PROTO,
265 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
266 EDMA_ERR_PRD_PAR |
267 EDMA_ERR_DEV_DCON |
268 EDMA_ERR_DEV_CON |
269 EDMA_ERR_OVERRUN_5 |
270 EDMA_ERR_UNDERRUN_5 |
271 EDMA_ERR_SELF_DIS_5 |
272 EDMA_ERR_CRQB_PAR |
273 EDMA_ERR_CRPB_PAR |
274 EDMA_ERR_INTRL_PAR |
275 EDMA_ERR_IORDY,
276
277 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
278 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
279
280 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
281 EDMA_REQ_Q_PTR_SHIFT = 5,
282
283 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
284 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
285 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
286 EDMA_RSP_Q_PTR_SHIFT = 3,
287
288 EDMA_CMD_OFS = 0x28, /* EDMA command register */
289 EDMA_EN = (1 << 0), /* enable EDMA */
290 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
291 ATA_RST = (1 << 2), /* reset trans/link/phy */
292
293 EDMA_IORDY_TMOUT = 0x34,
294 EDMA_ARB_CFG = 0x38,
295
296 /* Host private flags (hp_flags) */
297 MV_HP_FLAG_MSI = (1 << 0),
298 MV_HP_ERRATA_50XXB0 = (1 << 1),
299 MV_HP_ERRATA_50XXB2 = (1 << 2),
300 MV_HP_ERRATA_60X1B2 = (1 << 3),
301 MV_HP_ERRATA_60X1C0 = (1 << 4),
302 MV_HP_ERRATA_XX42A0 = (1 << 5),
303 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
304 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
305 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
306
307 /* Port private flags (pp_flags) */
308 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
309 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
310 };
311
312 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
313 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
314 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
315
316 enum {
317 /* DMA boundary 0xffff is required by the s/g splitting
318 * we need on /length/ in mv_fill-sg().
319 */
320 MV_DMA_BOUNDARY = 0xffffU,
321
322 /* mask of register bits containing lower 32 bits
323 * of EDMA request queue DMA address
324 */
325 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
326
327 /* ditto, for response queue */
328 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
329 };
330
331 enum chip_type {
332 chip_504x,
333 chip_508x,
334 chip_5080,
335 chip_604x,
336 chip_608x,
337 chip_6042,
338 chip_7042,
339 };
340
341 /* Command ReQuest Block: 32B */
342 struct mv_crqb {
343 __le32 sg_addr;
344 __le32 sg_addr_hi;
345 __le16 ctrl_flags;
346 __le16 ata_cmd[11];
347 };
348
349 struct mv_crqb_iie {
350 __le32 addr;
351 __le32 addr_hi;
352 __le32 flags;
353 __le32 len;
354 __le32 ata_cmd[4];
355 };
356
357 /* Command ResPonse Block: 8B */
358 struct mv_crpb {
359 __le16 id;
360 __le16 flags;
361 __le32 tmstmp;
362 };
363
364 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
365 struct mv_sg {
366 __le32 addr;
367 __le32 flags_size;
368 __le32 addr_hi;
369 __le32 reserved;
370 };
371
372 struct mv_port_priv {
373 struct mv_crqb *crqb;
374 dma_addr_t crqb_dma;
375 struct mv_crpb *crpb;
376 dma_addr_t crpb_dma;
377 struct mv_sg *sg_tbl;
378 dma_addr_t sg_tbl_dma;
379
380 unsigned int req_idx;
381 unsigned int resp_idx;
382
383 u32 pp_flags;
384 };
385
386 struct mv_port_signal {
387 u32 amps;
388 u32 pre;
389 };
390
391 struct mv_host_priv;
392 struct mv_hw_ops {
393 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
394 unsigned int port);
395 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
397 void __iomem *mmio);
398 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
399 unsigned int n_hc);
400 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
401 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
402 };
403
404 struct mv_host_priv {
405 u32 hp_flags;
406 struct mv_port_signal signal[8];
407 const struct mv_hw_ops *ops;
408 };
409
410 static void mv_irq_clear(struct ata_port *ap);
411 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
412 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
413 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
414 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
415 static int mv_port_start(struct ata_port *ap);
416 static void mv_port_stop(struct ata_port *ap);
417 static void mv_qc_prep(struct ata_queued_cmd *qc);
418 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
419 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
420 static void mv_error_handler(struct ata_port *ap);
421 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
422 static void mv_eh_freeze(struct ata_port *ap);
423 static void mv_eh_thaw(struct ata_port *ap);
424 static int mv_slave_config(struct scsi_device *sdev);
425 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
426
427 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
428 unsigned int port);
429 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
430 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
431 void __iomem *mmio);
432 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
433 unsigned int n_hc);
434 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
435 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
436
437 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
438 unsigned int port);
439 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
440 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
441 void __iomem *mmio);
442 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
443 unsigned int n_hc);
444 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
445 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
446 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
447 unsigned int port_no);
448
449 static struct scsi_host_template mv5_sht = {
450 .module = THIS_MODULE,
451 .name = DRV_NAME,
452 .ioctl = ata_scsi_ioctl,
453 .queuecommand = ata_scsi_queuecmd,
454 .can_queue = ATA_DEF_QUEUE,
455 .this_id = ATA_SHT_THIS_ID,
456 .sg_tablesize = MV_MAX_SG_CT / 2,
457 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
458 .emulated = ATA_SHT_EMULATED,
459 .use_clustering = 1,
460 .proc_name = DRV_NAME,
461 .dma_boundary = MV_DMA_BOUNDARY,
462 .slave_configure = mv_slave_config,
463 .slave_destroy = ata_scsi_slave_destroy,
464 .bios_param = ata_std_bios_param,
465 };
466
467 static struct scsi_host_template mv6_sht = {
468 .module = THIS_MODULE,
469 .name = DRV_NAME,
470 .ioctl = ata_scsi_ioctl,
471 .queuecommand = ata_scsi_queuecmd,
472 .can_queue = ATA_DEF_QUEUE,
473 .this_id = ATA_SHT_THIS_ID,
474 .sg_tablesize = MV_MAX_SG_CT / 2,
475 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
476 .emulated = ATA_SHT_EMULATED,
477 .use_clustering = 1,
478 .proc_name = DRV_NAME,
479 .dma_boundary = MV_DMA_BOUNDARY,
480 .slave_configure = mv_slave_config,
481 .slave_destroy = ata_scsi_slave_destroy,
482 .bios_param = ata_std_bios_param,
483 };
484
485 static const struct ata_port_operations mv5_ops = {
486 .port_disable = ata_port_disable,
487
488 .tf_load = ata_tf_load,
489 .tf_read = ata_tf_read,
490 .check_status = ata_check_status,
491 .exec_command = ata_exec_command,
492 .dev_select = ata_std_dev_select,
493
494 .cable_detect = ata_cable_sata,
495
496 .qc_prep = mv_qc_prep,
497 .qc_issue = mv_qc_issue,
498 .data_xfer = ata_data_xfer,
499
500 .irq_clear = mv_irq_clear,
501 .irq_on = ata_irq_on,
502
503 .error_handler = mv_error_handler,
504 .post_internal_cmd = mv_post_int_cmd,
505 .freeze = mv_eh_freeze,
506 .thaw = mv_eh_thaw,
507
508 .scr_read = mv5_scr_read,
509 .scr_write = mv5_scr_write,
510
511 .port_start = mv_port_start,
512 .port_stop = mv_port_stop,
513 };
514
515 static const struct ata_port_operations mv6_ops = {
516 .port_disable = ata_port_disable,
517
518 .tf_load = ata_tf_load,
519 .tf_read = ata_tf_read,
520 .check_status = ata_check_status,
521 .exec_command = ata_exec_command,
522 .dev_select = ata_std_dev_select,
523
524 .cable_detect = ata_cable_sata,
525
526 .qc_prep = mv_qc_prep,
527 .qc_issue = mv_qc_issue,
528 .data_xfer = ata_data_xfer,
529
530 .irq_clear = mv_irq_clear,
531 .irq_on = ata_irq_on,
532
533 .error_handler = mv_error_handler,
534 .post_internal_cmd = mv_post_int_cmd,
535 .freeze = mv_eh_freeze,
536 .thaw = mv_eh_thaw,
537
538 .scr_read = mv_scr_read,
539 .scr_write = mv_scr_write,
540
541 .port_start = mv_port_start,
542 .port_stop = mv_port_stop,
543 };
544
545 static const struct ata_port_operations mv_iie_ops = {
546 .port_disable = ata_port_disable,
547
548 .tf_load = ata_tf_load,
549 .tf_read = ata_tf_read,
550 .check_status = ata_check_status,
551 .exec_command = ata_exec_command,
552 .dev_select = ata_std_dev_select,
553
554 .cable_detect = ata_cable_sata,
555
556 .qc_prep = mv_qc_prep_iie,
557 .qc_issue = mv_qc_issue,
558 .data_xfer = ata_data_xfer,
559
560 .irq_clear = mv_irq_clear,
561 .irq_on = ata_irq_on,
562
563 .error_handler = mv_error_handler,
564 .post_internal_cmd = mv_post_int_cmd,
565 .freeze = mv_eh_freeze,
566 .thaw = mv_eh_thaw,
567
568 .scr_read = mv_scr_read,
569 .scr_write = mv_scr_write,
570
571 .port_start = mv_port_start,
572 .port_stop = mv_port_stop,
573 };
574
575 static const struct ata_port_info mv_port_info[] = {
576 { /* chip_504x */
577 .flags = MV_COMMON_FLAGS,
578 .pio_mask = 0x1f, /* pio0-4 */
579 .udma_mask = ATA_UDMA6,
580 .port_ops = &mv5_ops,
581 },
582 { /* chip_508x */
583 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
584 .pio_mask = 0x1f, /* pio0-4 */
585 .udma_mask = ATA_UDMA6,
586 .port_ops = &mv5_ops,
587 },
588 { /* chip_5080 */
589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
590 .pio_mask = 0x1f, /* pio0-4 */
591 .udma_mask = ATA_UDMA6,
592 .port_ops = &mv5_ops,
593 },
594 { /* chip_604x */
595 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
596 .pio_mask = 0x1f, /* pio0-4 */
597 .udma_mask = ATA_UDMA6,
598 .port_ops = &mv6_ops,
599 },
600 { /* chip_608x */
601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
602 MV_FLAG_DUAL_HC,
603 .pio_mask = 0x1f, /* pio0-4 */
604 .udma_mask = ATA_UDMA6,
605 .port_ops = &mv6_ops,
606 },
607 { /* chip_6042 */
608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
609 .pio_mask = 0x1f, /* pio0-4 */
610 .udma_mask = ATA_UDMA6,
611 .port_ops = &mv_iie_ops,
612 },
613 { /* chip_7042 */
614 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
615 .pio_mask = 0x1f, /* pio0-4 */
616 .udma_mask = ATA_UDMA6,
617 .port_ops = &mv_iie_ops,
618 },
619 };
620
621 static const struct pci_device_id mv_pci_tbl[] = {
622 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
623 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
624 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
625 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
626 /* RocketRAID 1740/174x have different identifiers */
627 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
628 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
629
630 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
631 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
632 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
633 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
634 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
635
636 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
637
638 /* Adaptec 1430SA */
639 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
640
641 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
642
643 /* add Marvell 7042 support */
644 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
645
646 { } /* terminate list */
647 };
648
649 static struct pci_driver mv_pci_driver = {
650 .name = DRV_NAME,
651 .id_table = mv_pci_tbl,
652 .probe = mv_init_one,
653 .remove = ata_pci_remove_one,
654 };
655
656 static const struct mv_hw_ops mv5xxx_ops = {
657 .phy_errata = mv5_phy_errata,
658 .enable_leds = mv5_enable_leds,
659 .read_preamp = mv5_read_preamp,
660 .reset_hc = mv5_reset_hc,
661 .reset_flash = mv5_reset_flash,
662 .reset_bus = mv5_reset_bus,
663 };
664
665 static const struct mv_hw_ops mv6xxx_ops = {
666 .phy_errata = mv6_phy_errata,
667 .enable_leds = mv6_enable_leds,
668 .read_preamp = mv6_read_preamp,
669 .reset_hc = mv6_reset_hc,
670 .reset_flash = mv6_reset_flash,
671 .reset_bus = mv_reset_pci_bus,
672 };
673
674 /*
675 * module options
676 */
677 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
678
679
680 /* move to PCI layer or libata core? */
681 static int pci_go_64(struct pci_dev *pdev)
682 {
683 int rc;
684
685 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
686 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
687 if (rc) {
688 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
689 if (rc) {
690 dev_printk(KERN_ERR, &pdev->dev,
691 "64-bit DMA enable failed\n");
692 return rc;
693 }
694 }
695 } else {
696 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
697 if (rc) {
698 dev_printk(KERN_ERR, &pdev->dev,
699 "32-bit DMA enable failed\n");
700 return rc;
701 }
702 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
703 if (rc) {
704 dev_printk(KERN_ERR, &pdev->dev,
705 "32-bit consistent DMA enable failed\n");
706 return rc;
707 }
708 }
709
710 return rc;
711 }
712
713 /*
714 * Functions
715 */
716
717 static inline void writelfl(unsigned long data, void __iomem *addr)
718 {
719 writel(data, addr);
720 (void) readl(addr); /* flush to avoid PCI posted write */
721 }
722
723 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
724 {
725 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
726 }
727
728 static inline unsigned int mv_hc_from_port(unsigned int port)
729 {
730 return port >> MV_PORT_HC_SHIFT;
731 }
732
733 static inline unsigned int mv_hardport_from_port(unsigned int port)
734 {
735 return port & MV_PORT_MASK;
736 }
737
738 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
739 unsigned int port)
740 {
741 return mv_hc_base(base, mv_hc_from_port(port));
742 }
743
744 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
745 {
746 return mv_hc_base_from_port(base, port) +
747 MV_SATAHC_ARBTR_REG_SZ +
748 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
749 }
750
751 static inline void __iomem *mv_ap_base(struct ata_port *ap)
752 {
753 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
754 }
755
756 static inline int mv_get_hc_count(unsigned long port_flags)
757 {
758 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
759 }
760
761 static void mv_irq_clear(struct ata_port *ap)
762 {
763 }
764
765 static int mv_slave_config(struct scsi_device *sdev)
766 {
767 int rc = ata_scsi_slave_config(sdev);
768 if (rc)
769 return rc;
770
771 blk_queue_max_phys_segments(sdev->request_queue, MV_MAX_SG_CT / 2);
772
773 return 0; /* scsi layer doesn't check return value, sigh */
774 }
775
776 static void mv_set_edma_ptrs(void __iomem *port_mmio,
777 struct mv_host_priv *hpriv,
778 struct mv_port_priv *pp)
779 {
780 u32 index;
781
782 /*
783 * initialize request queue
784 */
785 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
786
787 WARN_ON(pp->crqb_dma & 0x3ff);
788 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
789 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
790 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
791
792 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
793 writelfl((pp->crqb_dma & 0xffffffff) | index,
794 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
795 else
796 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
797
798 /*
799 * initialize response queue
800 */
801 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
802
803 WARN_ON(pp->crpb_dma & 0xff);
804 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
805
806 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
807 writelfl((pp->crpb_dma & 0xffffffff) | index,
808 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
809 else
810 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
811
812 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
813 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
814 }
815
816 /**
817 * mv_start_dma - Enable eDMA engine
818 * @base: port base address
819 * @pp: port private data
820 *
821 * Verify the local cache of the eDMA state is accurate with a
822 * WARN_ON.
823 *
824 * LOCKING:
825 * Inherited from caller.
826 */
827 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
828 struct mv_port_priv *pp)
829 {
830 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
831 /* clear EDMA event indicators, if any */
832 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
833
834 mv_set_edma_ptrs(base, hpriv, pp);
835
836 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
837 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
838 }
839 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
840 }
841
842 /**
843 * __mv_stop_dma - Disable eDMA engine
844 * @ap: ATA channel to manipulate
845 *
846 * Verify the local cache of the eDMA state is accurate with a
847 * WARN_ON.
848 *
849 * LOCKING:
850 * Inherited from caller.
851 */
852 static int __mv_stop_dma(struct ata_port *ap)
853 {
854 void __iomem *port_mmio = mv_ap_base(ap);
855 struct mv_port_priv *pp = ap->private_data;
856 u32 reg;
857 int i, err = 0;
858
859 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
860 /* Disable EDMA if active. The disable bit auto clears.
861 */
862 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
863 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
864 } else {
865 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
866 }
867
868 /* now properly wait for the eDMA to stop */
869 for (i = 1000; i > 0; i--) {
870 reg = readl(port_mmio + EDMA_CMD_OFS);
871 if (!(reg & EDMA_EN))
872 break;
873
874 udelay(100);
875 }
876
877 if (reg & EDMA_EN) {
878 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
879 err = -EIO;
880 }
881
882 return err;
883 }
884
885 static int mv_stop_dma(struct ata_port *ap)
886 {
887 unsigned long flags;
888 int rc;
889
890 spin_lock_irqsave(&ap->host->lock, flags);
891 rc = __mv_stop_dma(ap);
892 spin_unlock_irqrestore(&ap->host->lock, flags);
893
894 return rc;
895 }
896
897 #ifdef ATA_DEBUG
898 static void mv_dump_mem(void __iomem *start, unsigned bytes)
899 {
900 int b, w;
901 for (b = 0; b < bytes; ) {
902 DPRINTK("%p: ", start + b);
903 for (w = 0; b < bytes && w < 4; w++) {
904 printk("%08x ",readl(start + b));
905 b += sizeof(u32);
906 }
907 printk("\n");
908 }
909 }
910 #endif
911
912 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
913 {
914 #ifdef ATA_DEBUG
915 int b, w;
916 u32 dw;
917 for (b = 0; b < bytes; ) {
918 DPRINTK("%02x: ", b);
919 for (w = 0; b < bytes && w < 4; w++) {
920 (void) pci_read_config_dword(pdev,b,&dw);
921 printk("%08x ",dw);
922 b += sizeof(u32);
923 }
924 printk("\n");
925 }
926 #endif
927 }
928 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
929 struct pci_dev *pdev)
930 {
931 #ifdef ATA_DEBUG
932 void __iomem *hc_base = mv_hc_base(mmio_base,
933 port >> MV_PORT_HC_SHIFT);
934 void __iomem *port_base;
935 int start_port, num_ports, p, start_hc, num_hcs, hc;
936
937 if (0 > port) {
938 start_hc = start_port = 0;
939 num_ports = 8; /* shld be benign for 4 port devs */
940 num_hcs = 2;
941 } else {
942 start_hc = port >> MV_PORT_HC_SHIFT;
943 start_port = port;
944 num_ports = num_hcs = 1;
945 }
946 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
947 num_ports > 1 ? num_ports - 1 : start_port);
948
949 if (NULL != pdev) {
950 DPRINTK("PCI config space regs:\n");
951 mv_dump_pci_cfg(pdev, 0x68);
952 }
953 DPRINTK("PCI regs:\n");
954 mv_dump_mem(mmio_base+0xc00, 0x3c);
955 mv_dump_mem(mmio_base+0xd00, 0x34);
956 mv_dump_mem(mmio_base+0xf00, 0x4);
957 mv_dump_mem(mmio_base+0x1d00, 0x6c);
958 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
959 hc_base = mv_hc_base(mmio_base, hc);
960 DPRINTK("HC regs (HC %i):\n", hc);
961 mv_dump_mem(hc_base, 0x1c);
962 }
963 for (p = start_port; p < start_port + num_ports; p++) {
964 port_base = mv_port_base(mmio_base, p);
965 DPRINTK("EDMA regs (port %i):\n",p);
966 mv_dump_mem(port_base, 0x54);
967 DPRINTK("SATA regs (port %i):\n",p);
968 mv_dump_mem(port_base+0x300, 0x60);
969 }
970 #endif
971 }
972
973 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
974 {
975 unsigned int ofs;
976
977 switch (sc_reg_in) {
978 case SCR_STATUS:
979 case SCR_CONTROL:
980 case SCR_ERROR:
981 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
982 break;
983 case SCR_ACTIVE:
984 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
985 break;
986 default:
987 ofs = 0xffffffffU;
988 break;
989 }
990 return ofs;
991 }
992
993 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
994 {
995 unsigned int ofs = mv_scr_offset(sc_reg_in);
996
997 if (ofs != 0xffffffffU) {
998 *val = readl(mv_ap_base(ap) + ofs);
999 return 0;
1000 } else
1001 return -EINVAL;
1002 }
1003
1004 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1005 {
1006 unsigned int ofs = mv_scr_offset(sc_reg_in);
1007
1008 if (ofs != 0xffffffffU) {
1009 writelfl(val, mv_ap_base(ap) + ofs);
1010 return 0;
1011 } else
1012 return -EINVAL;
1013 }
1014
1015 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1016 void __iomem *port_mmio)
1017 {
1018 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
1019
1020 /* set up non-NCQ EDMA configuration */
1021 cfg &= ~(1 << 9); /* disable eQue */
1022
1023 if (IS_GEN_I(hpriv)) {
1024 cfg &= ~0x1f; /* clear queue depth */
1025 cfg |= (1 << 8); /* enab config burst size mask */
1026 }
1027
1028 else if (IS_GEN_II(hpriv)) {
1029 cfg &= ~0x1f; /* clear queue depth */
1030 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1031 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1032 }
1033
1034 else if (IS_GEN_IIE(hpriv)) {
1035 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1036 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1037 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1038 cfg |= (1 << 18); /* enab early completion */
1039 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1040 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1041 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1042 }
1043
1044 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1045 }
1046
1047 /**
1048 * mv_port_start - Port specific init/start routine.
1049 * @ap: ATA channel to manipulate
1050 *
1051 * Allocate and point to DMA memory, init port private memory,
1052 * zero indices.
1053 *
1054 * LOCKING:
1055 * Inherited from caller.
1056 */
1057 static int mv_port_start(struct ata_port *ap)
1058 {
1059 struct device *dev = ap->host->dev;
1060 struct mv_host_priv *hpriv = ap->host->private_data;
1061 struct mv_port_priv *pp;
1062 void __iomem *port_mmio = mv_ap_base(ap);
1063 void *mem;
1064 dma_addr_t mem_dma;
1065 unsigned long flags;
1066 int rc;
1067
1068 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1069 if (!pp)
1070 return -ENOMEM;
1071
1072 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1073 GFP_KERNEL);
1074 if (!mem)
1075 return -ENOMEM;
1076 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1077
1078 rc = ata_pad_alloc(ap, dev);
1079 if (rc)
1080 return rc;
1081
1082 /* First item in chunk of DMA memory:
1083 * 32-slot command request table (CRQB), 32 bytes each in size
1084 */
1085 pp->crqb = mem;
1086 pp->crqb_dma = mem_dma;
1087 mem += MV_CRQB_Q_SZ;
1088 mem_dma += MV_CRQB_Q_SZ;
1089
1090 /* Second item:
1091 * 32-slot command response table (CRPB), 8 bytes each in size
1092 */
1093 pp->crpb = mem;
1094 pp->crpb_dma = mem_dma;
1095 mem += MV_CRPB_Q_SZ;
1096 mem_dma += MV_CRPB_Q_SZ;
1097
1098 /* Third item:
1099 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1100 */
1101 pp->sg_tbl = mem;
1102 pp->sg_tbl_dma = mem_dma;
1103
1104 spin_lock_irqsave(&ap->host->lock, flags);
1105
1106 mv_edma_cfg(ap, hpriv, port_mmio);
1107
1108 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1109
1110 spin_unlock_irqrestore(&ap->host->lock, flags);
1111
1112 /* Don't turn on EDMA here...do it before DMA commands only. Else
1113 * we'll be unable to send non-data, PIO, etc due to restricted access
1114 * to shadow regs.
1115 */
1116 ap->private_data = pp;
1117 return 0;
1118 }
1119
1120 /**
1121 * mv_port_stop - Port specific cleanup/stop routine.
1122 * @ap: ATA channel to manipulate
1123 *
1124 * Stop DMA, cleanup port memory.
1125 *
1126 * LOCKING:
1127 * This routine uses the host lock to protect the DMA stop.
1128 */
1129 static void mv_port_stop(struct ata_port *ap)
1130 {
1131 mv_stop_dma(ap);
1132 }
1133
1134 /**
1135 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1136 * @qc: queued command whose SG list to source from
1137 *
1138 * Populate the SG list and mark the last entry.
1139 *
1140 * LOCKING:
1141 * Inherited from caller.
1142 */
1143 static void mv_fill_sg(struct ata_queued_cmd *qc)
1144 {
1145 struct mv_port_priv *pp = qc->ap->private_data;
1146 struct scatterlist *sg;
1147 struct mv_sg *mv_sg;
1148
1149 mv_sg = pp->sg_tbl;
1150 ata_for_each_sg(sg, qc) {
1151 dma_addr_t addr = sg_dma_address(sg);
1152 u32 sg_len = sg_dma_len(sg);
1153
1154 while (sg_len) {
1155 u32 offset = addr & 0xffff;
1156 u32 len = sg_len;
1157
1158 if ((offset + sg_len > 0x10000))
1159 len = 0x10000 - offset;
1160
1161 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1162 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1163 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1164
1165 sg_len -= len;
1166 addr += len;
1167
1168 if (!sg_len && ata_sg_is_last(sg, qc))
1169 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1170
1171 mv_sg++;
1172 }
1173
1174 }
1175 }
1176
1177 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1178 {
1179 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1180 (last ? CRQB_CMD_LAST : 0);
1181 *cmdw = cpu_to_le16(tmp);
1182 }
1183
1184 /**
1185 * mv_qc_prep - Host specific command preparation.
1186 * @qc: queued command to prepare
1187 *
1188 * This routine simply redirects to the general purpose routine
1189 * if command is not DMA. Else, it handles prep of the CRQB
1190 * (command request block), does some sanity checking, and calls
1191 * the SG load routine.
1192 *
1193 * LOCKING:
1194 * Inherited from caller.
1195 */
1196 static void mv_qc_prep(struct ata_queued_cmd *qc)
1197 {
1198 struct ata_port *ap = qc->ap;
1199 struct mv_port_priv *pp = ap->private_data;
1200 __le16 *cw;
1201 struct ata_taskfile *tf;
1202 u16 flags = 0;
1203 unsigned in_index;
1204
1205 if (qc->tf.protocol != ATA_PROT_DMA)
1206 return;
1207
1208 /* Fill in command request block
1209 */
1210 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1211 flags |= CRQB_FLAG_READ;
1212 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1213 flags |= qc->tag << CRQB_TAG_SHIFT;
1214 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1215
1216 /* get current queue index from software */
1217 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1218
1219 pp->crqb[in_index].sg_addr =
1220 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1221 pp->crqb[in_index].sg_addr_hi =
1222 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1223 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1224
1225 cw = &pp->crqb[in_index].ata_cmd[0];
1226 tf = &qc->tf;
1227
1228 /* Sadly, the CRQB cannot accomodate all registers--there are
1229 * only 11 bytes...so we must pick and choose required
1230 * registers based on the command. So, we drop feature and
1231 * hob_feature for [RW] DMA commands, but they are needed for
1232 * NCQ. NCQ will drop hob_nsect.
1233 */
1234 switch (tf->command) {
1235 case ATA_CMD_READ:
1236 case ATA_CMD_READ_EXT:
1237 case ATA_CMD_WRITE:
1238 case ATA_CMD_WRITE_EXT:
1239 case ATA_CMD_WRITE_FUA_EXT:
1240 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1241 break;
1242 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1243 case ATA_CMD_FPDMA_READ:
1244 case ATA_CMD_FPDMA_WRITE:
1245 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1246 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1247 break;
1248 #endif /* FIXME: remove this line when NCQ added */
1249 default:
1250 /* The only other commands EDMA supports in non-queued and
1251 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1252 * of which are defined/used by Linux. If we get here, this
1253 * driver needs work.
1254 *
1255 * FIXME: modify libata to give qc_prep a return value and
1256 * return error here.
1257 */
1258 BUG_ON(tf->command);
1259 break;
1260 }
1261 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1262 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1263 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1264 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1265 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1266 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1267 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1268 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1269 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1270
1271 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1272 return;
1273 mv_fill_sg(qc);
1274 }
1275
1276 /**
1277 * mv_qc_prep_iie - Host specific command preparation.
1278 * @qc: queued command to prepare
1279 *
1280 * This routine simply redirects to the general purpose routine
1281 * if command is not DMA. Else, it handles prep of the CRQB
1282 * (command request block), does some sanity checking, and calls
1283 * the SG load routine.
1284 *
1285 * LOCKING:
1286 * Inherited from caller.
1287 */
1288 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1289 {
1290 struct ata_port *ap = qc->ap;
1291 struct mv_port_priv *pp = ap->private_data;
1292 struct mv_crqb_iie *crqb;
1293 struct ata_taskfile *tf;
1294 unsigned in_index;
1295 u32 flags = 0;
1296
1297 if (qc->tf.protocol != ATA_PROT_DMA)
1298 return;
1299
1300 /* Fill in Gen IIE command request block
1301 */
1302 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1303 flags |= CRQB_FLAG_READ;
1304
1305 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1306 flags |= qc->tag << CRQB_TAG_SHIFT;
1307 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1308 what we use as our tag */
1309
1310 /* get current queue index from software */
1311 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1312
1313 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1314 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1315 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1316 crqb->flags = cpu_to_le32(flags);
1317
1318 tf = &qc->tf;
1319 crqb->ata_cmd[0] = cpu_to_le32(
1320 (tf->command << 16) |
1321 (tf->feature << 24)
1322 );
1323 crqb->ata_cmd[1] = cpu_to_le32(
1324 (tf->lbal << 0) |
1325 (tf->lbam << 8) |
1326 (tf->lbah << 16) |
1327 (tf->device << 24)
1328 );
1329 crqb->ata_cmd[2] = cpu_to_le32(
1330 (tf->hob_lbal << 0) |
1331 (tf->hob_lbam << 8) |
1332 (tf->hob_lbah << 16) |
1333 (tf->hob_feature << 24)
1334 );
1335 crqb->ata_cmd[3] = cpu_to_le32(
1336 (tf->nsect << 0) |
1337 (tf->hob_nsect << 8)
1338 );
1339
1340 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1341 return;
1342 mv_fill_sg(qc);
1343 }
1344
1345 /**
1346 * mv_qc_issue - Initiate a command to the host
1347 * @qc: queued command to start
1348 *
1349 * This routine simply redirects to the general purpose routine
1350 * if command is not DMA. Else, it sanity checks our local
1351 * caches of the request producer/consumer indices then enables
1352 * DMA and bumps the request producer index.
1353 *
1354 * LOCKING:
1355 * Inherited from caller.
1356 */
1357 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1358 {
1359 struct ata_port *ap = qc->ap;
1360 void __iomem *port_mmio = mv_ap_base(ap);
1361 struct mv_port_priv *pp = ap->private_data;
1362 struct mv_host_priv *hpriv = ap->host->private_data;
1363 u32 in_index;
1364
1365 if (qc->tf.protocol != ATA_PROT_DMA) {
1366 /* We're about to send a non-EDMA capable command to the
1367 * port. Turn off EDMA so there won't be problems accessing
1368 * shadow block, etc registers.
1369 */
1370 __mv_stop_dma(ap);
1371 return ata_qc_issue_prot(qc);
1372 }
1373
1374 mv_start_dma(port_mmio, hpriv, pp);
1375
1376 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1377
1378 /* until we do queuing, the queue should be empty at this point */
1379 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1380 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1381
1382 pp->req_idx++;
1383
1384 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1385
1386 /* and write the request in pointer to kick the EDMA to life */
1387 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1388 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1389
1390 return 0;
1391 }
1392
1393 /**
1394 * mv_err_intr - Handle error interrupts on the port
1395 * @ap: ATA channel to manipulate
1396 * @reset_allowed: bool: 0 == don't trigger from reset here
1397 *
1398 * In most cases, just clear the interrupt and move on. However,
1399 * some cases require an eDMA reset, which is done right before
1400 * the COMRESET in mv_phy_reset(). The SERR case requires a
1401 * clear of pending errors in the SATA SERROR register. Finally,
1402 * if the port disabled DMA, update our cached copy to match.
1403 *
1404 * LOCKING:
1405 * Inherited from caller.
1406 */
1407 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1408 {
1409 void __iomem *port_mmio = mv_ap_base(ap);
1410 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1411 struct mv_port_priv *pp = ap->private_data;
1412 struct mv_host_priv *hpriv = ap->host->private_data;
1413 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1414 unsigned int action = 0, err_mask = 0;
1415 struct ata_eh_info *ehi = &ap->link.eh_info;
1416
1417 ata_ehi_clear_desc(ehi);
1418
1419 if (!edma_enabled) {
1420 /* just a guess: do we need to do this? should we
1421 * expand this, and do it in all cases?
1422 */
1423 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1424 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1425 }
1426
1427 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1428
1429 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1430
1431 /*
1432 * all generations share these EDMA error cause bits
1433 */
1434
1435 if (edma_err_cause & EDMA_ERR_DEV)
1436 err_mask |= AC_ERR_DEV;
1437 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1438 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1439 EDMA_ERR_INTRL_PAR)) {
1440 err_mask |= AC_ERR_ATA_BUS;
1441 action |= ATA_EH_HARDRESET;
1442 ata_ehi_push_desc(ehi, "parity error");
1443 }
1444 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1445 ata_ehi_hotplugged(ehi);
1446 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1447 "dev disconnect" : "dev connect");
1448 }
1449
1450 if (IS_GEN_I(hpriv)) {
1451 eh_freeze_mask = EDMA_EH_FREEZE_5;
1452
1453 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1454 struct mv_port_priv *pp = ap->private_data;
1455 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1456 ata_ehi_push_desc(ehi, "EDMA self-disable");
1457 }
1458 } else {
1459 eh_freeze_mask = EDMA_EH_FREEZE;
1460
1461 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1462 struct mv_port_priv *pp = ap->private_data;
1463 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1464 ata_ehi_push_desc(ehi, "EDMA self-disable");
1465 }
1466
1467 if (edma_err_cause & EDMA_ERR_SERR) {
1468 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1469 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1470 err_mask = AC_ERR_ATA_BUS;
1471 action |= ATA_EH_HARDRESET;
1472 }
1473 }
1474
1475 /* Clear EDMA now that SERR cleanup done */
1476 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1477
1478 if (!err_mask) {
1479 err_mask = AC_ERR_OTHER;
1480 action |= ATA_EH_HARDRESET;
1481 }
1482
1483 ehi->serror |= serr;
1484 ehi->action |= action;
1485
1486 if (qc)
1487 qc->err_mask |= err_mask;
1488 else
1489 ehi->err_mask |= err_mask;
1490
1491 if (edma_err_cause & eh_freeze_mask)
1492 ata_port_freeze(ap);
1493 else
1494 ata_port_abort(ap);
1495 }
1496
1497 static void mv_intr_pio(struct ata_port *ap)
1498 {
1499 struct ata_queued_cmd *qc;
1500 u8 ata_status;
1501
1502 /* ignore spurious intr if drive still BUSY */
1503 ata_status = readb(ap->ioaddr.status_addr);
1504 if (unlikely(ata_status & ATA_BUSY))
1505 return;
1506
1507 /* get active ATA command */
1508 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1509 if (unlikely(!qc)) /* no active tag */
1510 return;
1511 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1512 return;
1513
1514 /* and finally, complete the ATA command */
1515 qc->err_mask |= ac_err_mask(ata_status);
1516 ata_qc_complete(qc);
1517 }
1518
1519 static void mv_intr_edma(struct ata_port *ap)
1520 {
1521 void __iomem *port_mmio = mv_ap_base(ap);
1522 struct mv_host_priv *hpriv = ap->host->private_data;
1523 struct mv_port_priv *pp = ap->private_data;
1524 struct ata_queued_cmd *qc;
1525 u32 out_index, in_index;
1526 bool work_done = false;
1527
1528 /* get h/w response queue pointer */
1529 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1530 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1531
1532 while (1) {
1533 u16 status;
1534 unsigned int tag;
1535
1536 /* get s/w response queue last-read pointer, and compare */
1537 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1538 if (in_index == out_index)
1539 break;
1540
1541 /* 50xx: get active ATA command */
1542 if (IS_GEN_I(hpriv))
1543 tag = ap->link.active_tag;
1544
1545 /* Gen II/IIE: get active ATA command via tag, to enable
1546 * support for queueing. this works transparently for
1547 * queued and non-queued modes.
1548 */
1549 else if (IS_GEN_II(hpriv))
1550 tag = (le16_to_cpu(pp->crpb[out_index].id)
1551 >> CRPB_IOID_SHIFT_6) & 0x3f;
1552
1553 else /* IS_GEN_IIE */
1554 tag = (le16_to_cpu(pp->crpb[out_index].id)
1555 >> CRPB_IOID_SHIFT_7) & 0x3f;
1556
1557 qc = ata_qc_from_tag(ap, tag);
1558
1559 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1560 * bits (WARNING: might not necessarily be associated
1561 * with this command), which -should- be clear
1562 * if all is well
1563 */
1564 status = le16_to_cpu(pp->crpb[out_index].flags);
1565 if (unlikely(status & 0xff)) {
1566 mv_err_intr(ap, qc);
1567 return;
1568 }
1569
1570 /* and finally, complete the ATA command */
1571 if (qc) {
1572 qc->err_mask |=
1573 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1574 ata_qc_complete(qc);
1575 }
1576
1577 /* advance software response queue pointer, to
1578 * indicate (after the loop completes) to hardware
1579 * that we have consumed a response queue entry.
1580 */
1581 work_done = true;
1582 pp->resp_idx++;
1583 }
1584
1585 if (work_done)
1586 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1587 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1588 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1589 }
1590
1591 /**
1592 * mv_host_intr - Handle all interrupts on the given host controller
1593 * @host: host specific structure
1594 * @relevant: port error bits relevant to this host controller
1595 * @hc: which host controller we're to look at
1596 *
1597 * Read then write clear the HC interrupt status then walk each
1598 * port connected to the HC and see if it needs servicing. Port
1599 * success ints are reported in the HC interrupt status reg, the
1600 * port error ints are reported in the higher level main
1601 * interrupt status register and thus are passed in via the
1602 * 'relevant' argument.
1603 *
1604 * LOCKING:
1605 * Inherited from caller.
1606 */
1607 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1608 {
1609 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1610 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1611 u32 hc_irq_cause;
1612 int port, port0;
1613
1614 if (hc == 0)
1615 port0 = 0;
1616 else
1617 port0 = MV_PORTS_PER_HC;
1618
1619 /* we'll need the HC success int register in most cases */
1620 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1621 if (!hc_irq_cause)
1622 return;
1623
1624 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1625
1626 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1627 hc,relevant,hc_irq_cause);
1628
1629 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1630 struct ata_port *ap = host->ports[port];
1631 struct mv_port_priv *pp = ap->private_data;
1632 int have_err_bits, hard_port, shift;
1633
1634 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1635 continue;
1636
1637 shift = port << 1; /* (port * 2) */
1638 if (port >= MV_PORTS_PER_HC) {
1639 shift++; /* skip bit 8 in the HC Main IRQ reg */
1640 }
1641 have_err_bits = ((PORT0_ERR << shift) & relevant);
1642
1643 if (unlikely(have_err_bits)) {
1644 struct ata_queued_cmd *qc;
1645
1646 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1647 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1648 continue;
1649
1650 mv_err_intr(ap, qc);
1651 continue;
1652 }
1653
1654 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1655
1656 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1657 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1658 mv_intr_edma(ap);
1659 } else {
1660 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1661 mv_intr_pio(ap);
1662 }
1663 }
1664 VPRINTK("EXIT\n");
1665 }
1666
1667 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1668 {
1669 struct ata_port *ap;
1670 struct ata_queued_cmd *qc;
1671 struct ata_eh_info *ehi;
1672 unsigned int i, err_mask, printed = 0;
1673 u32 err_cause;
1674
1675 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1676
1677 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1678 err_cause);
1679
1680 DPRINTK("All regs @ PCI error\n");
1681 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1682
1683 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1684
1685 for (i = 0; i < host->n_ports; i++) {
1686 ap = host->ports[i];
1687 if (!ata_link_offline(&ap->link)) {
1688 ehi = &ap->link.eh_info;
1689 ata_ehi_clear_desc(ehi);
1690 if (!printed++)
1691 ata_ehi_push_desc(ehi,
1692 "PCI err cause 0x%08x", err_cause);
1693 err_mask = AC_ERR_HOST_BUS;
1694 ehi->action = ATA_EH_HARDRESET;
1695 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1696 if (qc)
1697 qc->err_mask |= err_mask;
1698 else
1699 ehi->err_mask |= err_mask;
1700
1701 ata_port_freeze(ap);
1702 }
1703 }
1704 }
1705
1706 /**
1707 * mv_interrupt - Main interrupt event handler
1708 * @irq: unused
1709 * @dev_instance: private data; in this case the host structure
1710 *
1711 * Read the read only register to determine if any host
1712 * controllers have pending interrupts. If so, call lower level
1713 * routine to handle. Also check for PCI errors which are only
1714 * reported here.
1715 *
1716 * LOCKING:
1717 * This routine holds the host lock while processing pending
1718 * interrupts.
1719 */
1720 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1721 {
1722 struct ata_host *host = dev_instance;
1723 unsigned int hc, handled = 0, n_hcs;
1724 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1725 u32 irq_stat;
1726
1727 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1728
1729 /* check the cases where we either have nothing pending or have read
1730 * a bogus register value which can indicate HW removal or PCI fault
1731 */
1732 if (!irq_stat || (0xffffffffU == irq_stat))
1733 return IRQ_NONE;
1734
1735 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1736 spin_lock(&host->lock);
1737
1738 if (unlikely(irq_stat & PCI_ERR)) {
1739 mv_pci_error(host, mmio);
1740 handled = 1;
1741 goto out_unlock; /* skip all other HC irq handling */
1742 }
1743
1744 for (hc = 0; hc < n_hcs; hc++) {
1745 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1746 if (relevant) {
1747 mv_host_intr(host, relevant, hc);
1748 handled = 1;
1749 }
1750 }
1751
1752 out_unlock:
1753 spin_unlock(&host->lock);
1754
1755 return IRQ_RETVAL(handled);
1756 }
1757
1758 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1759 {
1760 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1761 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1762
1763 return hc_mmio + ofs;
1764 }
1765
1766 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1767 {
1768 unsigned int ofs;
1769
1770 switch (sc_reg_in) {
1771 case SCR_STATUS:
1772 case SCR_ERROR:
1773 case SCR_CONTROL:
1774 ofs = sc_reg_in * sizeof(u32);
1775 break;
1776 default:
1777 ofs = 0xffffffffU;
1778 break;
1779 }
1780 return ofs;
1781 }
1782
1783 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1784 {
1785 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1786 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1787 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1788
1789 if (ofs != 0xffffffffU) {
1790 *val = readl(addr + ofs);
1791 return 0;
1792 } else
1793 return -EINVAL;
1794 }
1795
1796 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1797 {
1798 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1799 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1800 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1801
1802 if (ofs != 0xffffffffU) {
1803 writelfl(val, addr + ofs);
1804 return 0;
1805 } else
1806 return -EINVAL;
1807 }
1808
1809 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1810 {
1811 int early_5080;
1812
1813 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1814
1815 if (!early_5080) {
1816 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1817 tmp |= (1 << 0);
1818 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1819 }
1820
1821 mv_reset_pci_bus(pdev, mmio);
1822 }
1823
1824 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1825 {
1826 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1827 }
1828
1829 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1830 void __iomem *mmio)
1831 {
1832 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1833 u32 tmp;
1834
1835 tmp = readl(phy_mmio + MV5_PHY_MODE);
1836
1837 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1838 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1839 }
1840
1841 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1842 {
1843 u32 tmp;
1844
1845 writel(0, mmio + MV_GPIO_PORT_CTL);
1846
1847 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1848
1849 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1850 tmp |= ~(1 << 0);
1851 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1852 }
1853
1854 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1855 unsigned int port)
1856 {
1857 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1858 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1859 u32 tmp;
1860 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1861
1862 if (fix_apm_sq) {
1863 tmp = readl(phy_mmio + MV5_LT_MODE);
1864 tmp |= (1 << 19);
1865 writel(tmp, phy_mmio + MV5_LT_MODE);
1866
1867 tmp = readl(phy_mmio + MV5_PHY_CTL);
1868 tmp &= ~0x3;
1869 tmp |= 0x1;
1870 writel(tmp, phy_mmio + MV5_PHY_CTL);
1871 }
1872
1873 tmp = readl(phy_mmio + MV5_PHY_MODE);
1874 tmp &= ~mask;
1875 tmp |= hpriv->signal[port].pre;
1876 tmp |= hpriv->signal[port].amps;
1877 writel(tmp, phy_mmio + MV5_PHY_MODE);
1878 }
1879
1880
1881 #undef ZERO
1882 #define ZERO(reg) writel(0, port_mmio + (reg))
1883 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1884 unsigned int port)
1885 {
1886 void __iomem *port_mmio = mv_port_base(mmio, port);
1887
1888 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1889
1890 mv_channel_reset(hpriv, mmio, port);
1891
1892 ZERO(0x028); /* command */
1893 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1894 ZERO(0x004); /* timer */
1895 ZERO(0x008); /* irq err cause */
1896 ZERO(0x00c); /* irq err mask */
1897 ZERO(0x010); /* rq bah */
1898 ZERO(0x014); /* rq inp */
1899 ZERO(0x018); /* rq outp */
1900 ZERO(0x01c); /* respq bah */
1901 ZERO(0x024); /* respq outp */
1902 ZERO(0x020); /* respq inp */
1903 ZERO(0x02c); /* test control */
1904 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1905 }
1906 #undef ZERO
1907
1908 #define ZERO(reg) writel(0, hc_mmio + (reg))
1909 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1910 unsigned int hc)
1911 {
1912 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1913 u32 tmp;
1914
1915 ZERO(0x00c);
1916 ZERO(0x010);
1917 ZERO(0x014);
1918 ZERO(0x018);
1919
1920 tmp = readl(hc_mmio + 0x20);
1921 tmp &= 0x1c1c1c1c;
1922 tmp |= 0x03030303;
1923 writel(tmp, hc_mmio + 0x20);
1924 }
1925 #undef ZERO
1926
1927 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1928 unsigned int n_hc)
1929 {
1930 unsigned int hc, port;
1931
1932 for (hc = 0; hc < n_hc; hc++) {
1933 for (port = 0; port < MV_PORTS_PER_HC; port++)
1934 mv5_reset_hc_port(hpriv, mmio,
1935 (hc * MV_PORTS_PER_HC) + port);
1936
1937 mv5_reset_one_hc(hpriv, mmio, hc);
1938 }
1939
1940 return 0;
1941 }
1942
1943 #undef ZERO
1944 #define ZERO(reg) writel(0, mmio + (reg))
1945 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1946 {
1947 u32 tmp;
1948
1949 tmp = readl(mmio + MV_PCI_MODE);
1950 tmp &= 0xff00ffff;
1951 writel(tmp, mmio + MV_PCI_MODE);
1952
1953 ZERO(MV_PCI_DISC_TIMER);
1954 ZERO(MV_PCI_MSI_TRIGGER);
1955 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1956 ZERO(HC_MAIN_IRQ_MASK_OFS);
1957 ZERO(MV_PCI_SERR_MASK);
1958 ZERO(PCI_IRQ_CAUSE_OFS);
1959 ZERO(PCI_IRQ_MASK_OFS);
1960 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1961 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1962 ZERO(MV_PCI_ERR_ATTRIBUTE);
1963 ZERO(MV_PCI_ERR_COMMAND);
1964 }
1965 #undef ZERO
1966
1967 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1968 {
1969 u32 tmp;
1970
1971 mv5_reset_flash(hpriv, mmio);
1972
1973 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1974 tmp &= 0x3;
1975 tmp |= (1 << 5) | (1 << 6);
1976 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1977 }
1978
1979 /**
1980 * mv6_reset_hc - Perform the 6xxx global soft reset
1981 * @mmio: base address of the HBA
1982 *
1983 * This routine only applies to 6xxx parts.
1984 *
1985 * LOCKING:
1986 * Inherited from caller.
1987 */
1988 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1989 unsigned int n_hc)
1990 {
1991 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1992 int i, rc = 0;
1993 u32 t;
1994
1995 /* Following procedure defined in PCI "main command and status
1996 * register" table.
1997 */
1998 t = readl(reg);
1999 writel(t | STOP_PCI_MASTER, reg);
2000
2001 for (i = 0; i < 1000; i++) {
2002 udelay(1);
2003 t = readl(reg);
2004 if (PCI_MASTER_EMPTY & t) {
2005 break;
2006 }
2007 }
2008 if (!(PCI_MASTER_EMPTY & t)) {
2009 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2010 rc = 1;
2011 goto done;
2012 }
2013
2014 /* set reset */
2015 i = 5;
2016 do {
2017 writel(t | GLOB_SFT_RST, reg);
2018 t = readl(reg);
2019 udelay(1);
2020 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2021
2022 if (!(GLOB_SFT_RST & t)) {
2023 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2024 rc = 1;
2025 goto done;
2026 }
2027
2028 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2029 i = 5;
2030 do {
2031 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2032 t = readl(reg);
2033 udelay(1);
2034 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2035
2036 if (GLOB_SFT_RST & t) {
2037 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2038 rc = 1;
2039 }
2040 done:
2041 return rc;
2042 }
2043
2044 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2045 void __iomem *mmio)
2046 {
2047 void __iomem *port_mmio;
2048 u32 tmp;
2049
2050 tmp = readl(mmio + MV_RESET_CFG);
2051 if ((tmp & (1 << 0)) == 0) {
2052 hpriv->signal[idx].amps = 0x7 << 8;
2053 hpriv->signal[idx].pre = 0x1 << 5;
2054 return;
2055 }
2056
2057 port_mmio = mv_port_base(mmio, idx);
2058 tmp = readl(port_mmio + PHY_MODE2);
2059
2060 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2061 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2062 }
2063
2064 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2065 {
2066 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2067 }
2068
2069 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2070 unsigned int port)
2071 {
2072 void __iomem *port_mmio = mv_port_base(mmio, port);
2073
2074 u32 hp_flags = hpriv->hp_flags;
2075 int fix_phy_mode2 =
2076 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2077 int fix_phy_mode4 =
2078 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2079 u32 m2, tmp;
2080
2081 if (fix_phy_mode2) {
2082 m2 = readl(port_mmio + PHY_MODE2);
2083 m2 &= ~(1 << 16);
2084 m2 |= (1 << 31);
2085 writel(m2, port_mmio + PHY_MODE2);
2086
2087 udelay(200);
2088
2089 m2 = readl(port_mmio + PHY_MODE2);
2090 m2 &= ~((1 << 16) | (1 << 31));
2091 writel(m2, port_mmio + PHY_MODE2);
2092
2093 udelay(200);
2094 }
2095
2096 /* who knows what this magic does */
2097 tmp = readl(port_mmio + PHY_MODE3);
2098 tmp &= ~0x7F800000;
2099 tmp |= 0x2A800000;
2100 writel(tmp, port_mmio + PHY_MODE3);
2101
2102 if (fix_phy_mode4) {
2103 u32 m4;
2104
2105 m4 = readl(port_mmio + PHY_MODE4);
2106
2107 if (hp_flags & MV_HP_ERRATA_60X1B2)
2108 tmp = readl(port_mmio + 0x310);
2109
2110 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2111
2112 writel(m4, port_mmio + PHY_MODE4);
2113
2114 if (hp_flags & MV_HP_ERRATA_60X1B2)
2115 writel(tmp, port_mmio + 0x310);
2116 }
2117
2118 /* Revert values of pre-emphasis and signal amps to the saved ones */
2119 m2 = readl(port_mmio + PHY_MODE2);
2120
2121 m2 &= ~MV_M2_PREAMP_MASK;
2122 m2 |= hpriv->signal[port].amps;
2123 m2 |= hpriv->signal[port].pre;
2124 m2 &= ~(1 << 16);
2125
2126 /* according to mvSata 3.6.1, some IIE values are fixed */
2127 if (IS_GEN_IIE(hpriv)) {
2128 m2 &= ~0xC30FF01F;
2129 m2 |= 0x0000900F;
2130 }
2131
2132 writel(m2, port_mmio + PHY_MODE2);
2133 }
2134
2135 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2136 unsigned int port_no)
2137 {
2138 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2139
2140 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2141
2142 if (IS_GEN_II(hpriv)) {
2143 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2144 ifctl |= (1 << 7); /* enable gen2i speed */
2145 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2146 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2147 }
2148
2149 udelay(25); /* allow reset propagation */
2150
2151 /* Spec never mentions clearing the bit. Marvell's driver does
2152 * clear the bit, however.
2153 */
2154 writelfl(0, port_mmio + EDMA_CMD_OFS);
2155
2156 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2157
2158 if (IS_GEN_I(hpriv))
2159 mdelay(1);
2160 }
2161
2162 /**
2163 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2164 * @ap: ATA channel to manipulate
2165 *
2166 * Part of this is taken from __sata_phy_reset and modified to
2167 * not sleep since this routine gets called from interrupt level.
2168 *
2169 * LOCKING:
2170 * Inherited from caller. This is coded to safe to call at
2171 * interrupt level, i.e. it does not sleep.
2172 */
2173 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2174 unsigned long deadline)
2175 {
2176 struct mv_port_priv *pp = ap->private_data;
2177 struct mv_host_priv *hpriv = ap->host->private_data;
2178 void __iomem *port_mmio = mv_ap_base(ap);
2179 int retry = 5;
2180 u32 sstatus;
2181
2182 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2183
2184 #ifdef DEBUG
2185 {
2186 u32 sstatus, serror, scontrol;
2187
2188 mv_scr_read(ap, SCR_STATUS, &sstatus);
2189 mv_scr_read(ap, SCR_ERROR, &serror);
2190 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2191 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2192 "SCtrl 0x%08x\n", status, serror, scontrol);
2193 }
2194 #endif
2195
2196 /* Issue COMRESET via SControl */
2197 comreset_retry:
2198 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2199 msleep(1);
2200
2201 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2202 msleep(20);
2203
2204 do {
2205 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2206 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2207 break;
2208
2209 msleep(1);
2210 } while (time_before(jiffies, deadline));
2211
2212 /* work around errata */
2213 if (IS_GEN_II(hpriv) &&
2214 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2215 (retry-- > 0))
2216 goto comreset_retry;
2217
2218 #ifdef DEBUG
2219 {
2220 u32 sstatus, serror, scontrol;
2221
2222 mv_scr_read(ap, SCR_STATUS, &sstatus);
2223 mv_scr_read(ap, SCR_ERROR, &serror);
2224 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2225 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2226 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2227 }
2228 #endif
2229
2230 if (ata_link_offline(&ap->link)) {
2231 *class = ATA_DEV_NONE;
2232 return;
2233 }
2234
2235 /* even after SStatus reflects that device is ready,
2236 * it seems to take a while for link to be fully
2237 * established (and thus Status no longer 0x80/0x7F),
2238 * so we poll a bit for that, here.
2239 */
2240 retry = 20;
2241 while (1) {
2242 u8 drv_stat = ata_check_status(ap);
2243 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2244 break;
2245 msleep(500);
2246 if (retry-- <= 0)
2247 break;
2248 if (time_after(jiffies, deadline))
2249 break;
2250 }
2251
2252 /* FIXME: if we passed the deadline, the following
2253 * code probably produces an invalid result
2254 */
2255
2256 /* finally, read device signature from TF registers */
2257 *class = ata_dev_try_classify(ap, 0, NULL);
2258
2259 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2260
2261 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2262
2263 VPRINTK("EXIT\n");
2264 }
2265
2266 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2267 {
2268 struct ata_port *ap = link->ap;
2269 struct mv_port_priv *pp = ap->private_data;
2270 struct ata_eh_context *ehc = &link->eh_context;
2271 int rc;
2272
2273 rc = mv_stop_dma(ap);
2274 if (rc)
2275 ehc->i.action |= ATA_EH_HARDRESET;
2276
2277 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2278 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2279 ehc->i.action |= ATA_EH_HARDRESET;
2280 }
2281
2282 /* if we're about to do hardreset, nothing more to do */
2283 if (ehc->i.action & ATA_EH_HARDRESET)
2284 return 0;
2285
2286 if (ata_link_online(link))
2287 rc = ata_wait_ready(ap, deadline);
2288 else
2289 rc = -ENODEV;
2290
2291 return rc;
2292 }
2293
2294 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2295 unsigned long deadline)
2296 {
2297 struct ata_port *ap = link->ap;
2298 struct mv_host_priv *hpriv = ap->host->private_data;
2299 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2300
2301 mv_stop_dma(ap);
2302
2303 mv_channel_reset(hpriv, mmio, ap->port_no);
2304
2305 mv_phy_reset(ap, class, deadline);
2306
2307 return 0;
2308 }
2309
2310 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2311 {
2312 struct ata_port *ap = link->ap;
2313 u32 serr;
2314
2315 /* print link status */
2316 sata_print_link_status(link);
2317
2318 /* clear SError */
2319 sata_scr_read(link, SCR_ERROR, &serr);
2320 sata_scr_write_flush(link, SCR_ERROR, serr);
2321
2322 /* bail out if no device is present */
2323 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2324 DPRINTK("EXIT, no device\n");
2325 return;
2326 }
2327
2328 /* set up device control */
2329 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2330 }
2331
2332 static void mv_error_handler(struct ata_port *ap)
2333 {
2334 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2335 mv_hardreset, mv_postreset);
2336 }
2337
2338 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2339 {
2340 mv_stop_dma(qc->ap);
2341 }
2342
2343 static void mv_eh_freeze(struct ata_port *ap)
2344 {
2345 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2346 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2347 u32 tmp, mask;
2348 unsigned int shift;
2349
2350 /* FIXME: handle coalescing completion events properly */
2351
2352 shift = ap->port_no * 2;
2353 if (hc > 0)
2354 shift++;
2355
2356 mask = 0x3 << shift;
2357
2358 /* disable assertion of portN err, done events */
2359 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2360 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2361 }
2362
2363 static void mv_eh_thaw(struct ata_port *ap)
2364 {
2365 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2366 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2367 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2368 void __iomem *port_mmio = mv_ap_base(ap);
2369 u32 tmp, mask, hc_irq_cause;
2370 unsigned int shift, hc_port_no = ap->port_no;
2371
2372 /* FIXME: handle coalescing completion events properly */
2373
2374 shift = ap->port_no * 2;
2375 if (hc > 0) {
2376 shift++;
2377 hc_port_no -= 4;
2378 }
2379
2380 mask = 0x3 << shift;
2381
2382 /* clear EDMA errors on this port */
2383 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2384
2385 /* clear pending irq events */
2386 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2387 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2388 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2389 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2390
2391 /* enable assertion of portN err, done events */
2392 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2393 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2394 }
2395
2396 /**
2397 * mv_port_init - Perform some early initialization on a single port.
2398 * @port: libata data structure storing shadow register addresses
2399 * @port_mmio: base address of the port
2400 *
2401 * Initialize shadow register mmio addresses, clear outstanding
2402 * interrupts on the port, and unmask interrupts for the future
2403 * start of the port.
2404 *
2405 * LOCKING:
2406 * Inherited from caller.
2407 */
2408 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2409 {
2410 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2411 unsigned serr_ofs;
2412
2413 /* PIO related setup
2414 */
2415 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2416 port->error_addr =
2417 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2418 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2419 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2420 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2421 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2422 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2423 port->status_addr =
2424 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2425 /* special case: control/altstatus doesn't have ATA_REG_ address */
2426 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2427
2428 /* unused: */
2429 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2430
2431 /* Clear any currently outstanding port interrupt conditions */
2432 serr_ofs = mv_scr_offset(SCR_ERROR);
2433 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2434 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2435
2436 /* unmask all EDMA error interrupts */
2437 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2438
2439 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2440 readl(port_mmio + EDMA_CFG_OFS),
2441 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2442 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2443 }
2444
2445 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2446 {
2447 struct pci_dev *pdev = to_pci_dev(host->dev);
2448 struct mv_host_priv *hpriv = host->private_data;
2449 u32 hp_flags = hpriv->hp_flags;
2450
2451 switch(board_idx) {
2452 case chip_5080:
2453 hpriv->ops = &mv5xxx_ops;
2454 hp_flags |= MV_HP_GEN_I;
2455
2456 switch (pdev->revision) {
2457 case 0x1:
2458 hp_flags |= MV_HP_ERRATA_50XXB0;
2459 break;
2460 case 0x3:
2461 hp_flags |= MV_HP_ERRATA_50XXB2;
2462 break;
2463 default:
2464 dev_printk(KERN_WARNING, &pdev->dev,
2465 "Applying 50XXB2 workarounds to unknown rev\n");
2466 hp_flags |= MV_HP_ERRATA_50XXB2;
2467 break;
2468 }
2469 break;
2470
2471 case chip_504x:
2472 case chip_508x:
2473 hpriv->ops = &mv5xxx_ops;
2474 hp_flags |= MV_HP_GEN_I;
2475
2476 switch (pdev->revision) {
2477 case 0x0:
2478 hp_flags |= MV_HP_ERRATA_50XXB0;
2479 break;
2480 case 0x3:
2481 hp_flags |= MV_HP_ERRATA_50XXB2;
2482 break;
2483 default:
2484 dev_printk(KERN_WARNING, &pdev->dev,
2485 "Applying B2 workarounds to unknown rev\n");
2486 hp_flags |= MV_HP_ERRATA_50XXB2;
2487 break;
2488 }
2489 break;
2490
2491 case chip_604x:
2492 case chip_608x:
2493 hpriv->ops = &mv6xxx_ops;
2494 hp_flags |= MV_HP_GEN_II;
2495
2496 switch (pdev->revision) {
2497 case 0x7:
2498 hp_flags |= MV_HP_ERRATA_60X1B2;
2499 break;
2500 case 0x9:
2501 hp_flags |= MV_HP_ERRATA_60X1C0;
2502 break;
2503 default:
2504 dev_printk(KERN_WARNING, &pdev->dev,
2505 "Applying B2 workarounds to unknown rev\n");
2506 hp_flags |= MV_HP_ERRATA_60X1B2;
2507 break;
2508 }
2509 break;
2510
2511 case chip_7042:
2512 case chip_6042:
2513 hpriv->ops = &mv6xxx_ops;
2514 hp_flags |= MV_HP_GEN_IIE;
2515
2516 switch (pdev->revision) {
2517 case 0x0:
2518 hp_flags |= MV_HP_ERRATA_XX42A0;
2519 break;
2520 case 0x1:
2521 hp_flags |= MV_HP_ERRATA_60X1C0;
2522 break;
2523 default:
2524 dev_printk(KERN_WARNING, &pdev->dev,
2525 "Applying 60X1C0 workarounds to unknown rev\n");
2526 hp_flags |= MV_HP_ERRATA_60X1C0;
2527 break;
2528 }
2529 break;
2530
2531 default:
2532 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2533 return 1;
2534 }
2535
2536 hpriv->hp_flags = hp_flags;
2537
2538 return 0;
2539 }
2540
2541 /**
2542 * mv_init_host - Perform some early initialization of the host.
2543 * @host: ATA host to initialize
2544 * @board_idx: controller index
2545 *
2546 * If possible, do an early global reset of the host. Then do
2547 * our port init and clear/unmask all/relevant host interrupts.
2548 *
2549 * LOCKING:
2550 * Inherited from caller.
2551 */
2552 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2553 {
2554 int rc = 0, n_hc, port, hc;
2555 struct pci_dev *pdev = to_pci_dev(host->dev);
2556 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2557 struct mv_host_priv *hpriv = host->private_data;
2558
2559 /* global interrupt mask */
2560 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2561
2562 rc = mv_chip_id(host, board_idx);
2563 if (rc)
2564 goto done;
2565
2566 n_hc = mv_get_hc_count(host->ports[0]->flags);
2567
2568 for (port = 0; port < host->n_ports; port++)
2569 hpriv->ops->read_preamp(hpriv, port, mmio);
2570
2571 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2572 if (rc)
2573 goto done;
2574
2575 hpriv->ops->reset_flash(hpriv, mmio);
2576 hpriv->ops->reset_bus(pdev, mmio);
2577 hpriv->ops->enable_leds(hpriv, mmio);
2578
2579 for (port = 0; port < host->n_ports; port++) {
2580 if (IS_GEN_II(hpriv)) {
2581 void __iomem *port_mmio = mv_port_base(mmio, port);
2582
2583 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2584 ifctl |= (1 << 7); /* enable gen2i speed */
2585 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2586 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2587 }
2588
2589 hpriv->ops->phy_errata(hpriv, mmio, port);
2590 }
2591
2592 for (port = 0; port < host->n_ports; port++) {
2593 void __iomem *port_mmio = mv_port_base(mmio, port);
2594 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2595 }
2596
2597 for (hc = 0; hc < n_hc; hc++) {
2598 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2599
2600 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2601 "(before clear)=0x%08x\n", hc,
2602 readl(hc_mmio + HC_CFG_OFS),
2603 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2604
2605 /* Clear any currently outstanding hc interrupt conditions */
2606 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2607 }
2608
2609 /* Clear any currently outstanding host interrupt conditions */
2610 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2611
2612 /* and unmask interrupt generation for host regs */
2613 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2614
2615 if (IS_GEN_I(hpriv))
2616 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2617 else
2618 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2619
2620 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2621 "PCI int cause/mask=0x%08x/0x%08x\n",
2622 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2623 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2624 readl(mmio + PCI_IRQ_CAUSE_OFS),
2625 readl(mmio + PCI_IRQ_MASK_OFS));
2626
2627 done:
2628 return rc;
2629 }
2630
2631 /**
2632 * mv_print_info - Dump key info to kernel log for perusal.
2633 * @host: ATA host to print info about
2634 *
2635 * FIXME: complete this.
2636 *
2637 * LOCKING:
2638 * Inherited from caller.
2639 */
2640 static void mv_print_info(struct ata_host *host)
2641 {
2642 struct pci_dev *pdev = to_pci_dev(host->dev);
2643 struct mv_host_priv *hpriv = host->private_data;
2644 u8 scc;
2645 const char *scc_s, *gen;
2646
2647 /* Use this to determine the HW stepping of the chip so we know
2648 * what errata to workaround
2649 */
2650 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2651 if (scc == 0)
2652 scc_s = "SCSI";
2653 else if (scc == 0x01)
2654 scc_s = "RAID";
2655 else
2656 scc_s = "?";
2657
2658 if (IS_GEN_I(hpriv))
2659 gen = "I";
2660 else if (IS_GEN_II(hpriv))
2661 gen = "II";
2662 else if (IS_GEN_IIE(hpriv))
2663 gen = "IIE";
2664 else
2665 gen = "?";
2666
2667 dev_printk(KERN_INFO, &pdev->dev,
2668 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2669 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2670 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2671 }
2672
2673 /**
2674 * mv_init_one - handle a positive probe of a Marvell host
2675 * @pdev: PCI device found
2676 * @ent: PCI device ID entry for the matched host
2677 *
2678 * LOCKING:
2679 * Inherited from caller.
2680 */
2681 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2682 {
2683 static int printed_version = 0;
2684 unsigned int board_idx = (unsigned int)ent->driver_data;
2685 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2686 struct ata_host *host;
2687 struct mv_host_priv *hpriv;
2688 int n_ports, rc;
2689
2690 if (!printed_version++)
2691 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2692
2693 /* allocate host */
2694 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2695
2696 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2697 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2698 if (!host || !hpriv)
2699 return -ENOMEM;
2700 host->private_data = hpriv;
2701
2702 /* acquire resources */
2703 rc = pcim_enable_device(pdev);
2704 if (rc)
2705 return rc;
2706
2707 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2708 if (rc == -EBUSY)
2709 pcim_pin_device(pdev);
2710 if (rc)
2711 return rc;
2712 host->iomap = pcim_iomap_table(pdev);
2713
2714 rc = pci_go_64(pdev);
2715 if (rc)
2716 return rc;
2717
2718 /* initialize adapter */
2719 rc = mv_init_host(host, board_idx);
2720 if (rc)
2721 return rc;
2722
2723 /* Enable interrupts */
2724 if (msi && pci_enable_msi(pdev))
2725 pci_intx(pdev, 1);
2726
2727 mv_dump_pci_cfg(pdev, 0x68);
2728 mv_print_info(host);
2729
2730 pci_set_master(pdev);
2731 pci_try_set_mwi(pdev);
2732 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2733 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2734 }
2735
2736 static int __init mv_init(void)
2737 {
2738 return pci_register_driver(&mv_pci_driver);
2739 }
2740
2741 static void __exit mv_exit(void)
2742 {
2743 pci_unregister_driver(&mv_pci_driver);
2744 }
2745
2746 MODULE_AUTHOR("Brett Russ");
2747 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2748 MODULE_LICENSE("GPL");
2749 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2750 MODULE_VERSION(DRV_VERSION);
2751
2752 module_param(msi, int, 0444);
2753 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2754
2755 module_init(mv_init);
2756 module_exit(mv_exit);
This page took 0.095153 seconds and 4 git commands to generate.