2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Improve/fix IRQ and error handling sequences.
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 6) Add port multiplier support (intermediate)
44 8) Develop a low-power-consumption strategy, and implement it.
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
88 MV_IO_BAR
= 2, /* offset 0x18: IO space */
89 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
95 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE
= (MV_IRQ_COAL_REG_BASE
+ 0x08),
97 MV_IRQ_COAL_CAUSE_LO
= (MV_IRQ_COAL_REG_BASE
+ 0x88),
98 MV_IRQ_COAL_CAUSE_HI
= (MV_IRQ_COAL_REG_BASE
+ 0x8c),
99 MV_IRQ_COAL_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xd0),
102 MV_SATAHC0_REG_BASE
= 0x20000,
103 MV_FLASH_CTL
= 0x1046c,
104 MV_GPIO_PORT_CTL
= 0x104f0,
105 MV_RESET_CFG
= 0x180d8,
107 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
108 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
109 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
110 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
113 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
120 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
122 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT
= 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
131 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC
= (1 << 28),
136 MV_COMMON_FLAGS
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
137 ATA_FLAG_MMIO
| ATA_FLAG_NO_ATAPI
|
138 ATA_FLAG_PIO_POLLING
,
139 MV_6XXX_FLAGS
= MV_FLAG_IRQ_COALESCE
,
141 CRQB_FLAG_READ
= (1 << 0),
143 CRQB_IOID_SHIFT
= 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT
= 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT
= 8,
146 CRQB_CMD_CS
= (0x2 << 11),
147 CRQB_CMD_LAST
= (1 << 15),
149 CRPB_FLAG_STATUS_SHIFT
= 8,
150 CRPB_IOID_SHIFT_6
= 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7
= 7, /* CRPB Gen-IIE IO Id shift */
153 EPRD_FLAG_END_OF_TBL
= (1 << 31),
155 /* PCI interface registers */
157 PCI_COMMAND_OFS
= 0xc00,
159 PCI_MAIN_CMD_STS_OFS
= 0xd30,
160 STOP_PCI_MASTER
= (1 << 2),
161 PCI_MASTER_EMPTY
= (1 << 3),
162 GLOB_SFT_RST
= (1 << 4),
165 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
166 MV_PCI_DISC_TIMER
= 0xd04,
167 MV_PCI_MSI_TRIGGER
= 0xc38,
168 MV_PCI_SERR_MASK
= 0xc28,
169 MV_PCI_XBAR_TMOUT
= 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
173 MV_PCI_ERR_COMMAND
= 0x1d50,
175 PCI_IRQ_CAUSE_OFS
= 0x1d58,
176 PCI_IRQ_MASK_OFS
= 0x1d5c,
177 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
179 PCIE_IRQ_CAUSE_OFS
= 0x1900,
180 PCIE_IRQ_MASK_OFS
= 0x1910,
181 PCIE_UNMASK_ALL_IRQS
= 0x40a, /* assorted bits */
183 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS
= 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS
= 0x20024,
187 PORT0_ERR
= (1 << 0), /* shift by port # */
188 PORT0_DONE
= (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
192 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE
= (1 << 8),
195 PORTS_4_7_COAL_DONE
= (1 << 17),
196 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT
= (1 << 22),
198 SELF_INT
= (1 << 23),
199 TWSI_INT
= (1 << 24),
200 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5
= (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC
= (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
204 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
206 HC_MAIN_MASKED_IRQS_5
= (PORTS_0_3_COAL_DONE
| PORTS_4_7_COAL_DONE
|
208 HC_MAIN_MASKED_IRQS_SOC
= (PORTS_0_3_COAL_DONE
| HC_MAIN_RSVD_SOC
),
210 /* SATAHC registers */
213 HC_IRQ_CAUSE_OFS
= 0x14,
214 CRPB_DMA_DONE
= (1 << 0), /* shift by port # */
215 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
216 DEV_IRQ
= (1 << 8), /* shift by port # */
218 /* Shadow block registers */
220 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
223 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS
= 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS
= 0x364,
232 SATA_INTERFACE_CTL
= 0x050,
234 MV_M2_PREAMP_MASK
= 0x7e0,
238 EDMA_CFG_Q_DEPTH
= 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ
= (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
244 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
245 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
246 EDMA_ERR_D_PAR
= (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR
= (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV
= (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON
= (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON
= (1 << 4), /* device connected */
251 EDMA_ERR_SERR
= (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS
= (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5
= (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC
= (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7
= (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR
= (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR
= (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR
= (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY
= (1 << 12), /* IORdy timeout */
261 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0
= (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1
= (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3
= (1 << 16), /* transient: FIS rx err */
267 EDMA_ERR_LNK_DATA_RX
= (0xf << 17), /* link data rx error */
269 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0
= (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1
= (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2
= (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3
= (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4
= (1 << 25), /* transient: FIS collision */
276 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26), /* link data tx error */
278 EDMA_ERR_TRANS_PROTO
= (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5
= (1 << 5),
280 EDMA_ERR_UNDERRUN_5
= (1 << 6),
282 EDMA_ERR_IRQ_TRANSIENT
= EDMA_ERR_LNK_CTRL_RX_0
|
283 EDMA_ERR_LNK_CTRL_RX_1
|
284 EDMA_ERR_LNK_CTRL_RX_3
|
285 EDMA_ERR_LNK_CTRL_TX
,
287 EDMA_EH_FREEZE
= EDMA_ERR_D_PAR
|
297 EDMA_ERR_LNK_CTRL_RX_2
|
298 EDMA_ERR_LNK_DATA_RX
|
299 EDMA_ERR_LNK_DATA_TX
|
300 EDMA_ERR_TRANS_PROTO
,
301 EDMA_EH_FREEZE_5
= EDMA_ERR_D_PAR
|
306 EDMA_ERR_UNDERRUN_5
|
307 EDMA_ERR_SELF_DIS_5
|
313 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
316 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
317 EDMA_REQ_Q_PTR_SHIFT
= 5,
319 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT
= 3,
324 EDMA_CMD_OFS
= 0x28, /* EDMA command register */
325 EDMA_EN
= (1 << 0), /* enable EDMA */
326 EDMA_DS
= (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST
= (1 << 2), /* reset trans/link/phy */
329 EDMA_IORDY_TMOUT
= 0x34,
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI
= (1 << 0),
334 MV_HP_ERRATA_50XXB0
= (1 << 1),
335 MV_HP_ERRATA_50XXB2
= (1 << 2),
336 MV_HP_ERRATA_60X1B2
= (1 << 3),
337 MV_HP_ERRATA_60X1C0
= (1 << 4),
338 MV_HP_ERRATA_XX42A0
= (1 << 5),
339 MV_HP_GEN_I
= (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II
= (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE
= (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE
= (1 << 9), /* PCIe bus/regs: 7042 */
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN
= (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN
= (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET
= (1 << 2), /* 1st hard reset complete? */
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
359 MV_DMA_BOUNDARY
= 0xffffU
,
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
364 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
381 /* Command ReQuest Block: 32B */
397 /* Command ResPonse Block: 8B */
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
412 struct mv_port_priv
{
413 struct mv_crqb
*crqb
;
415 struct mv_crpb
*crpb
;
417 struct mv_sg
*sg_tbl
[MV_MAX_Q_DEPTH
];
418 dma_addr_t sg_tbl_dma
[MV_MAX_Q_DEPTH
];
420 unsigned int req_idx
;
421 unsigned int resp_idx
;
426 struct mv_port_signal
{
431 struct mv_host_priv
{
433 struct mv_port_signal signal
[8];
434 const struct mv_hw_ops
*ops
;
437 void __iomem
*main_cause_reg_addr
;
438 void __iomem
*main_mask_reg_addr
;
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
447 struct dma_pool
*crqb_pool
;
448 struct dma_pool
*crpb_pool
;
449 struct dma_pool
*sg_tbl_pool
;
453 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
455 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
456 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
458 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
460 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
461 void (*reset_bus
)(struct ata_host
*host
, void __iomem
*mmio
);
464 static int mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
);
465 static int mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
466 static int mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
);
467 static int mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
468 static int mv_port_start(struct ata_port
*ap
);
469 static void mv_port_stop(struct ata_port
*ap
);
470 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
471 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
472 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
473 static void mv_error_handler(struct ata_port
*ap
);
474 static void mv_eh_freeze(struct ata_port
*ap
);
475 static void mv_eh_thaw(struct ata_port
*ap
);
476 static void mv6_dev_config(struct ata_device
*dev
);
478 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
480 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
481 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
483 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
485 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
486 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
488 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
490 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
491 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
493 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
495 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
496 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
498 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
500 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
501 void __iomem
*mmio
, unsigned int n_hc
);
502 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
504 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
505 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
);
506 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
507 unsigned int port_no
);
508 static void mv_edma_cfg(struct mv_port_priv
*pp
, struct mv_host_priv
*hpriv
,
509 void __iomem
*port_mmio
, int want_ncq
);
510 static int __mv_stop_dma(struct ata_port
*ap
);
512 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
516 static struct scsi_host_template mv5_sht
= {
517 .module
= THIS_MODULE
,
519 .ioctl
= ata_scsi_ioctl
,
520 .queuecommand
= ata_scsi_queuecmd
,
521 .can_queue
= ATA_DEF_QUEUE
,
522 .this_id
= ATA_SHT_THIS_ID
,
523 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
524 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
525 .emulated
= ATA_SHT_EMULATED
,
527 .proc_name
= DRV_NAME
,
528 .dma_boundary
= MV_DMA_BOUNDARY
,
529 .slave_configure
= ata_scsi_slave_config
,
530 .slave_destroy
= ata_scsi_slave_destroy
,
531 .bios_param
= ata_std_bios_param
,
534 static struct scsi_host_template mv6_sht
= {
535 .module
= THIS_MODULE
,
537 .ioctl
= ata_scsi_ioctl
,
538 .queuecommand
= ata_scsi_queuecmd
,
539 .change_queue_depth
= ata_scsi_change_queue_depth
,
540 .can_queue
= MV_MAX_Q_DEPTH
- 1,
541 .this_id
= ATA_SHT_THIS_ID
,
542 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
543 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
544 .emulated
= ATA_SHT_EMULATED
,
546 .proc_name
= DRV_NAME
,
547 .dma_boundary
= MV_DMA_BOUNDARY
,
548 .slave_configure
= ata_scsi_slave_config
,
549 .slave_destroy
= ata_scsi_slave_destroy
,
550 .bios_param
= ata_std_bios_param
,
553 static const struct ata_port_operations mv5_ops
= {
554 .tf_load
= ata_tf_load
,
555 .tf_read
= ata_tf_read
,
556 .check_status
= ata_check_status
,
557 .exec_command
= ata_exec_command
,
558 .dev_select
= ata_std_dev_select
,
560 .qc_prep
= mv_qc_prep
,
561 .qc_issue
= mv_qc_issue
,
562 .data_xfer
= ata_data_xfer
,
564 .irq_clear
= ata_noop_irq_clear
,
565 .irq_on
= ata_irq_on
,
567 .error_handler
= mv_error_handler
,
568 .freeze
= mv_eh_freeze
,
571 .scr_read
= mv5_scr_read
,
572 .scr_write
= mv5_scr_write
,
574 .port_start
= mv_port_start
,
575 .port_stop
= mv_port_stop
,
578 static const struct ata_port_operations mv6_ops
= {
579 .dev_config
= mv6_dev_config
,
580 .tf_load
= ata_tf_load
,
581 .tf_read
= ata_tf_read
,
582 .check_status
= ata_check_status
,
583 .exec_command
= ata_exec_command
,
584 .dev_select
= ata_std_dev_select
,
586 .qc_prep
= mv_qc_prep
,
587 .qc_issue
= mv_qc_issue
,
588 .data_xfer
= ata_data_xfer
,
590 .irq_clear
= ata_noop_irq_clear
,
591 .irq_on
= ata_irq_on
,
593 .error_handler
= mv_error_handler
,
594 .freeze
= mv_eh_freeze
,
596 .qc_defer
= ata_std_qc_defer
,
598 .scr_read
= mv_scr_read
,
599 .scr_write
= mv_scr_write
,
601 .port_start
= mv_port_start
,
602 .port_stop
= mv_port_stop
,
605 static const struct ata_port_operations mv_iie_ops
= {
606 .tf_load
= ata_tf_load
,
607 .tf_read
= ata_tf_read
,
608 .check_status
= ata_check_status
,
609 .exec_command
= ata_exec_command
,
610 .dev_select
= ata_std_dev_select
,
612 .qc_prep
= mv_qc_prep_iie
,
613 .qc_issue
= mv_qc_issue
,
614 .data_xfer
= ata_data_xfer
,
616 .irq_clear
= ata_noop_irq_clear
,
617 .irq_on
= ata_irq_on
,
619 .error_handler
= mv_error_handler
,
620 .freeze
= mv_eh_freeze
,
622 .qc_defer
= ata_std_qc_defer
,
624 .scr_read
= mv_scr_read
,
625 .scr_write
= mv_scr_write
,
627 .port_start
= mv_port_start
,
628 .port_stop
= mv_port_stop
,
631 static const struct ata_port_info mv_port_info
[] = {
633 .flags
= MV_COMMON_FLAGS
,
634 .pio_mask
= 0x1f, /* pio0-4 */
635 .udma_mask
= ATA_UDMA6
,
636 .port_ops
= &mv5_ops
,
639 .flags
= MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
,
640 .pio_mask
= 0x1f, /* pio0-4 */
641 .udma_mask
= ATA_UDMA6
,
642 .port_ops
= &mv5_ops
,
645 .flags
= MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
,
646 .pio_mask
= 0x1f, /* pio0-4 */
647 .udma_mask
= ATA_UDMA6
,
648 .port_ops
= &mv5_ops
,
651 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
653 .pio_mask
= 0x1f, /* pio0-4 */
654 .udma_mask
= ATA_UDMA6
,
655 .port_ops
= &mv6_ops
,
658 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
659 ATA_FLAG_NCQ
| MV_FLAG_DUAL_HC
,
660 .pio_mask
= 0x1f, /* pio0-4 */
661 .udma_mask
= ATA_UDMA6
,
662 .port_ops
= &mv6_ops
,
665 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
667 .pio_mask
= 0x1f, /* pio0-4 */
668 .udma_mask
= ATA_UDMA6
,
669 .port_ops
= &mv_iie_ops
,
672 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
674 .pio_mask
= 0x1f, /* pio0-4 */
675 .udma_mask
= ATA_UDMA6
,
676 .port_ops
= &mv_iie_ops
,
679 .flags
= MV_COMMON_FLAGS
| MV_FLAG_SOC
,
680 .pio_mask
= 0x1f, /* pio0-4 */
681 .udma_mask
= ATA_UDMA6
,
682 .port_ops
= &mv_iie_ops
,
686 static const struct pci_device_id mv_pci_tbl
[] = {
687 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
688 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
689 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
690 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
691 /* RocketRAID 1740/174x have different identifiers */
692 { PCI_VDEVICE(TTI
, 0x1740), chip_508x
},
693 { PCI_VDEVICE(TTI
, 0x1742), chip_508x
},
695 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
696 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
697 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
698 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
699 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
701 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
704 { PCI_VDEVICE(ADAPTEC2
, 0x0243), chip_7042
},
706 /* Marvell 7042 support */
707 { PCI_VDEVICE(MARVELL
, 0x7042), chip_7042
},
709 /* Highpoint RocketRAID PCIe series */
710 { PCI_VDEVICE(TTI
, 0x2300), chip_7042
},
711 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
713 { } /* terminate list */
716 static const struct mv_hw_ops mv5xxx_ops
= {
717 .phy_errata
= mv5_phy_errata
,
718 .enable_leds
= mv5_enable_leds
,
719 .read_preamp
= mv5_read_preamp
,
720 .reset_hc
= mv5_reset_hc
,
721 .reset_flash
= mv5_reset_flash
,
722 .reset_bus
= mv5_reset_bus
,
725 static const struct mv_hw_ops mv6xxx_ops
= {
726 .phy_errata
= mv6_phy_errata
,
727 .enable_leds
= mv6_enable_leds
,
728 .read_preamp
= mv6_read_preamp
,
729 .reset_hc
= mv6_reset_hc
,
730 .reset_flash
= mv6_reset_flash
,
731 .reset_bus
= mv_reset_pci_bus
,
734 static const struct mv_hw_ops mv_soc_ops
= {
735 .phy_errata
= mv6_phy_errata
,
736 .enable_leds
= mv_soc_enable_leds
,
737 .read_preamp
= mv_soc_read_preamp
,
738 .reset_hc
= mv_soc_reset_hc
,
739 .reset_flash
= mv_soc_reset_flash
,
740 .reset_bus
= mv_soc_reset_bus
,
747 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
750 (void) readl(addr
); /* flush to avoid PCI posted write */
753 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
755 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
758 static inline unsigned int mv_hc_from_port(unsigned int port
)
760 return port
>> MV_PORT_HC_SHIFT
;
763 static inline unsigned int mv_hardport_from_port(unsigned int port
)
765 return port
& MV_PORT_MASK
;
768 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
771 return mv_hc_base(base
, mv_hc_from_port(port
));
774 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
776 return mv_hc_base_from_port(base
, port
) +
777 MV_SATAHC_ARBTR_REG_SZ
+
778 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
781 static inline void __iomem
*mv_host_base(struct ata_host
*host
)
783 struct mv_host_priv
*hpriv
= host
->private_data
;
787 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
789 return mv_port_base(mv_host_base(ap
->host
), ap
->port_no
);
792 static inline int mv_get_hc_count(unsigned long port_flags
)
794 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
797 static void mv_set_edma_ptrs(void __iomem
*port_mmio
,
798 struct mv_host_priv
*hpriv
,
799 struct mv_port_priv
*pp
)
804 * initialize request queue
806 index
= (pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_REQ_Q_PTR_SHIFT
;
808 WARN_ON(pp
->crqb_dma
& 0x3ff);
809 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
810 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | index
,
811 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
813 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
814 writelfl((pp
->crqb_dma
& 0xffffffff) | index
,
815 port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
817 writelfl(index
, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
820 * initialize response queue
822 index
= (pp
->resp_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_RSP_Q_PTR_SHIFT
;
824 WARN_ON(pp
->crpb_dma
& 0xff);
825 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
827 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
828 writelfl((pp
->crpb_dma
& 0xffffffff) | index
,
829 port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
831 writelfl(index
, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
833 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) | index
,
834 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
838 * mv_start_dma - Enable eDMA engine
839 * @base: port base address
840 * @pp: port private data
842 * Verify the local cache of the eDMA state is accurate with a
846 * Inherited from caller.
848 static void mv_start_dma(struct ata_port
*ap
, void __iomem
*port_mmio
,
849 struct mv_port_priv
*pp
, u8 protocol
)
851 int want_ncq
= (protocol
== ATA_PROT_NCQ
);
853 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
854 int using_ncq
= ((pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) != 0);
855 if (want_ncq
!= using_ncq
)
858 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
)) {
859 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
860 int hard_port
= mv_hardport_from_port(ap
->port_no
);
861 void __iomem
*hc_mmio
= mv_hc_base_from_port(
862 mv_host_base(ap
->host
), hard_port
);
863 u32 hc_irq_cause
, ipending
;
865 /* clear EDMA event indicators, if any */
866 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
868 /* clear EDMA interrupt indicator, if any */
869 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
870 ipending
= (DEV_IRQ
<< hard_port
) |
871 (CRPB_DMA_DONE
<< hard_port
);
872 if (hc_irq_cause
& ipending
) {
873 writelfl(hc_irq_cause
& ~ipending
,
874 hc_mmio
+ HC_IRQ_CAUSE_OFS
);
877 mv_edma_cfg(pp
, hpriv
, port_mmio
, want_ncq
);
879 /* clear FIS IRQ Cause */
880 writelfl(0, port_mmio
+ SATA_FIS_IRQ_CAUSE_OFS
);
882 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
884 writelfl(EDMA_EN
, port_mmio
+ EDMA_CMD_OFS
);
885 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
887 WARN_ON(!(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
)));
891 * __mv_stop_dma - Disable eDMA engine
892 * @ap: ATA channel to manipulate
894 * Verify the local cache of the eDMA state is accurate with a
898 * Inherited from caller.
900 static int __mv_stop_dma(struct ata_port
*ap
)
902 void __iomem
*port_mmio
= mv_ap_base(ap
);
903 struct mv_port_priv
*pp
= ap
->private_data
;
907 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
908 /* Disable EDMA if active. The disable bit auto clears.
910 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
911 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
913 WARN_ON(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
));
916 /* now properly wait for the eDMA to stop */
917 for (i
= 1000; i
> 0; i
--) {
918 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
919 if (!(reg
& EDMA_EN
))
926 ata_port_printk(ap
, KERN_ERR
, "Unable to stop eDMA\n");
933 static int mv_stop_dma(struct ata_port
*ap
)
938 spin_lock_irqsave(&ap
->host
->lock
, flags
);
939 rc
= __mv_stop_dma(ap
);
940 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
946 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
949 for (b
= 0; b
< bytes
; ) {
950 DPRINTK("%p: ", start
+ b
);
951 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
952 printk("%08x ", readl(start
+ b
));
960 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
965 for (b
= 0; b
< bytes
; ) {
966 DPRINTK("%02x: ", b
);
967 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
968 (void) pci_read_config_dword(pdev
, b
, &dw
);
976 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
977 struct pci_dev
*pdev
)
980 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
981 port
>> MV_PORT_HC_SHIFT
);
982 void __iomem
*port_base
;
983 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
986 start_hc
= start_port
= 0;
987 num_ports
= 8; /* shld be benign for 4 port devs */
990 start_hc
= port
>> MV_PORT_HC_SHIFT
;
992 num_ports
= num_hcs
= 1;
994 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
995 num_ports
> 1 ? num_ports
- 1 : start_port
);
998 DPRINTK("PCI config space regs:\n");
999 mv_dump_pci_cfg(pdev
, 0x68);
1001 DPRINTK("PCI regs:\n");
1002 mv_dump_mem(mmio_base
+0xc00, 0x3c);
1003 mv_dump_mem(mmio_base
+0xd00, 0x34);
1004 mv_dump_mem(mmio_base
+0xf00, 0x4);
1005 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
1006 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
1007 hc_base
= mv_hc_base(mmio_base
, hc
);
1008 DPRINTK("HC regs (HC %i):\n", hc
);
1009 mv_dump_mem(hc_base
, 0x1c);
1011 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
1012 port_base
= mv_port_base(mmio_base
, p
);
1013 DPRINTK("EDMA regs (port %i):\n", p
);
1014 mv_dump_mem(port_base
, 0x54);
1015 DPRINTK("SATA regs (port %i):\n", p
);
1016 mv_dump_mem(port_base
+0x300, 0x60);
1021 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
1025 switch (sc_reg_in
) {
1029 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
1032 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
1041 static int mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
)
1043 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1045 if (ofs
!= 0xffffffffU
) {
1046 *val
= readl(mv_ap_base(ap
) + ofs
);
1052 static int mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1054 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1056 if (ofs
!= 0xffffffffU
) {
1057 writelfl(val
, mv_ap_base(ap
) + ofs
);
1063 static void mv6_dev_config(struct ata_device
*adev
)
1066 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1067 * See mv_qc_prep() for more info.
1069 if (adev
->flags
& ATA_DFLAG_NCQ
)
1070 if (adev
->max_sectors
> ATA_MAX_SECTORS
)
1071 adev
->max_sectors
= ATA_MAX_SECTORS
;
1074 static void mv_edma_cfg(struct mv_port_priv
*pp
, struct mv_host_priv
*hpriv
,
1075 void __iomem
*port_mmio
, int want_ncq
)
1079 /* set up non-NCQ EDMA configuration */
1080 cfg
= EDMA_CFG_Q_DEPTH
; /* always 0x1f for *all* chips */
1082 if (IS_GEN_I(hpriv
))
1083 cfg
|= (1 << 8); /* enab config burst size mask */
1085 else if (IS_GEN_II(hpriv
))
1086 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
1088 else if (IS_GEN_IIE(hpriv
)) {
1089 cfg
|= (1 << 23); /* do not mask PM field in rx'd FIS */
1090 cfg
|= (1 << 22); /* enab 4-entry host queue cache */
1091 cfg
|= (1 << 18); /* enab early completion */
1092 cfg
|= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1096 cfg
|= EDMA_CFG_NCQ
;
1097 pp
->pp_flags
|= MV_PP_FLAG_NCQ_EN
;
1099 pp
->pp_flags
&= ~MV_PP_FLAG_NCQ_EN
;
1101 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
1104 static void mv_port_free_dma_mem(struct ata_port
*ap
)
1106 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1107 struct mv_port_priv
*pp
= ap
->private_data
;
1111 dma_pool_free(hpriv
->crqb_pool
, pp
->crqb
, pp
->crqb_dma
);
1115 dma_pool_free(hpriv
->crpb_pool
, pp
->crpb
, pp
->crpb_dma
);
1119 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1120 * For later hardware, we have one unique sg_tbl per NCQ tag.
1122 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1123 if (pp
->sg_tbl
[tag
]) {
1124 if (tag
== 0 || !IS_GEN_I(hpriv
))
1125 dma_pool_free(hpriv
->sg_tbl_pool
,
1127 pp
->sg_tbl_dma
[tag
]);
1128 pp
->sg_tbl
[tag
] = NULL
;
1134 * mv_port_start - Port specific init/start routine.
1135 * @ap: ATA channel to manipulate
1137 * Allocate and point to DMA memory, init port private memory,
1141 * Inherited from caller.
1143 static int mv_port_start(struct ata_port
*ap
)
1145 struct device
*dev
= ap
->host
->dev
;
1146 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1147 struct mv_port_priv
*pp
;
1148 void __iomem
*port_mmio
= mv_ap_base(ap
);
1149 unsigned long flags
;
1152 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1155 ap
->private_data
= pp
;
1157 pp
->crqb
= dma_pool_alloc(hpriv
->crqb_pool
, GFP_KERNEL
, &pp
->crqb_dma
);
1160 memset(pp
->crqb
, 0, MV_CRQB_Q_SZ
);
1162 pp
->crpb
= dma_pool_alloc(hpriv
->crpb_pool
, GFP_KERNEL
, &pp
->crpb_dma
);
1164 goto out_port_free_dma_mem
;
1165 memset(pp
->crpb
, 0, MV_CRPB_Q_SZ
);
1168 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1169 * For later hardware, we need one unique sg_tbl per NCQ tag.
1171 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1172 if (tag
== 0 || !IS_GEN_I(hpriv
)) {
1173 pp
->sg_tbl
[tag
] = dma_pool_alloc(hpriv
->sg_tbl_pool
,
1174 GFP_KERNEL
, &pp
->sg_tbl_dma
[tag
]);
1175 if (!pp
->sg_tbl
[tag
])
1176 goto out_port_free_dma_mem
;
1178 pp
->sg_tbl
[tag
] = pp
->sg_tbl
[0];
1179 pp
->sg_tbl_dma
[tag
] = pp
->sg_tbl_dma
[0];
1183 spin_lock_irqsave(&ap
->host
->lock
, flags
);
1185 mv_edma_cfg(pp
, hpriv
, port_mmio
, 0);
1186 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
1188 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
1190 /* Don't turn on EDMA here...do it before DMA commands only. Else
1191 * we'll be unable to send non-data, PIO, etc due to restricted access
1196 out_port_free_dma_mem
:
1197 mv_port_free_dma_mem(ap
);
1202 * mv_port_stop - Port specific cleanup/stop routine.
1203 * @ap: ATA channel to manipulate
1205 * Stop DMA, cleanup port memory.
1208 * This routine uses the host lock to protect the DMA stop.
1210 static void mv_port_stop(struct ata_port
*ap
)
1213 mv_port_free_dma_mem(ap
);
1217 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1218 * @qc: queued command whose SG list to source from
1220 * Populate the SG list and mark the last entry.
1223 * Inherited from caller.
1225 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
1227 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1228 struct scatterlist
*sg
;
1229 struct mv_sg
*mv_sg
, *last_sg
= NULL
;
1232 mv_sg
= pp
->sg_tbl
[qc
->tag
];
1233 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1234 dma_addr_t addr
= sg_dma_address(sg
);
1235 u32 sg_len
= sg_dma_len(sg
);
1238 u32 offset
= addr
& 0xffff;
1241 if ((offset
+ sg_len
> 0x10000))
1242 len
= 0x10000 - offset
;
1244 mv_sg
->addr
= cpu_to_le32(addr
& 0xffffffff);
1245 mv_sg
->addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1246 mv_sg
->flags_size
= cpu_to_le32(len
& 0xffff);
1256 if (likely(last_sg
))
1257 last_sg
->flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1260 static void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1262 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1263 (last
? CRQB_CMD_LAST
: 0);
1264 *cmdw
= cpu_to_le16(tmp
);
1268 * mv_qc_prep - Host specific command preparation.
1269 * @qc: queued command to prepare
1271 * This routine simply redirects to the general purpose routine
1272 * if command is not DMA. Else, it handles prep of the CRQB
1273 * (command request block), does some sanity checking, and calls
1274 * the SG load routine.
1277 * Inherited from caller.
1279 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1281 struct ata_port
*ap
= qc
->ap
;
1282 struct mv_port_priv
*pp
= ap
->private_data
;
1284 struct ata_taskfile
*tf
;
1288 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1289 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1292 /* Fill in command request block
1294 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1295 flags
|= CRQB_FLAG_READ
;
1296 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1297 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1299 /* get current queue index from software */
1300 in_index
= pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
;
1302 pp
->crqb
[in_index
].sg_addr
=
1303 cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1304 pp
->crqb
[in_index
].sg_addr_hi
=
1305 cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1306 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
1308 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
1311 /* Sadly, the CRQB cannot accomodate all registers--there are
1312 * only 11 bytes...so we must pick and choose required
1313 * registers based on the command. So, we drop feature and
1314 * hob_feature for [RW] DMA commands, but they are needed for
1315 * NCQ. NCQ will drop hob_nsect.
1317 switch (tf
->command
) {
1319 case ATA_CMD_READ_EXT
:
1321 case ATA_CMD_WRITE_EXT
:
1322 case ATA_CMD_WRITE_FUA_EXT
:
1323 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1325 case ATA_CMD_FPDMA_READ
:
1326 case ATA_CMD_FPDMA_WRITE
:
1327 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1328 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1331 /* The only other commands EDMA supports in non-queued and
1332 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1333 * of which are defined/used by Linux. If we get here, this
1334 * driver needs work.
1336 * FIXME: modify libata to give qc_prep a return value and
1337 * return error here.
1339 BUG_ON(tf
->command
);
1342 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1343 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1344 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1345 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1346 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1347 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1348 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1349 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1350 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1352 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1358 * mv_qc_prep_iie - Host specific command preparation.
1359 * @qc: queued command to prepare
1361 * This routine simply redirects to the general purpose routine
1362 * if command is not DMA. Else, it handles prep of the CRQB
1363 * (command request block), does some sanity checking, and calls
1364 * the SG load routine.
1367 * Inherited from caller.
1369 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1371 struct ata_port
*ap
= qc
->ap
;
1372 struct mv_port_priv
*pp
= ap
->private_data
;
1373 struct mv_crqb_iie
*crqb
;
1374 struct ata_taskfile
*tf
;
1378 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1379 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1382 /* Fill in Gen IIE command request block
1384 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1385 flags
|= CRQB_FLAG_READ
;
1387 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1388 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1389 flags
|= qc
->tag
<< CRQB_HOSTQ_SHIFT
;
1391 /* get current queue index from software */
1392 in_index
= pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
;
1394 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
1395 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1396 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1397 crqb
->flags
= cpu_to_le32(flags
);
1400 crqb
->ata_cmd
[0] = cpu_to_le32(
1401 (tf
->command
<< 16) |
1404 crqb
->ata_cmd
[1] = cpu_to_le32(
1410 crqb
->ata_cmd
[2] = cpu_to_le32(
1411 (tf
->hob_lbal
<< 0) |
1412 (tf
->hob_lbam
<< 8) |
1413 (tf
->hob_lbah
<< 16) |
1414 (tf
->hob_feature
<< 24)
1416 crqb
->ata_cmd
[3] = cpu_to_le32(
1418 (tf
->hob_nsect
<< 8)
1421 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1427 * mv_qc_issue - Initiate a command to the host
1428 * @qc: queued command to start
1430 * This routine simply redirects to the general purpose routine
1431 * if command is not DMA. Else, it sanity checks our local
1432 * caches of the request producer/consumer indices then enables
1433 * DMA and bumps the request producer index.
1436 * Inherited from caller.
1438 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1440 struct ata_port
*ap
= qc
->ap
;
1441 void __iomem
*port_mmio
= mv_ap_base(ap
);
1442 struct mv_port_priv
*pp
= ap
->private_data
;
1445 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1446 (qc
->tf
.protocol
!= ATA_PROT_NCQ
)) {
1447 /* We're about to send a non-EDMA capable command to the
1448 * port. Turn off EDMA so there won't be problems accessing
1449 * shadow block, etc registers.
1452 return ata_qc_issue_prot(qc
);
1455 mv_start_dma(ap
, port_mmio
, pp
, qc
->tf
.protocol
);
1459 in_index
= (pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_REQ_Q_PTR_SHIFT
;
1461 /* and write the request in pointer to kick the EDMA to life */
1462 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | in_index
,
1463 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1469 * mv_err_intr - Handle error interrupts on the port
1470 * @ap: ATA channel to manipulate
1471 * @reset_allowed: bool: 0 == don't trigger from reset here
1473 * In most cases, just clear the interrupt and move on. However,
1474 * some cases require an eDMA reset, which is done right before
1475 * the COMRESET in mv_phy_reset(). The SERR case requires a
1476 * clear of pending errors in the SATA SERROR register. Finally,
1477 * if the port disabled DMA, update our cached copy to match.
1480 * Inherited from caller.
1482 static void mv_err_intr(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
1484 void __iomem
*port_mmio
= mv_ap_base(ap
);
1485 u32 edma_err_cause
, eh_freeze_mask
, serr
= 0;
1486 struct mv_port_priv
*pp
= ap
->private_data
;
1487 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1488 unsigned int edma_enabled
= (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
1489 unsigned int action
= 0, err_mask
= 0;
1490 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1492 ata_ehi_clear_desc(ehi
);
1494 if (!edma_enabled
) {
1495 /* just a guess: do we need to do this? should we
1496 * expand this, and do it in all cases?
1498 sata_scr_read(&ap
->link
, SCR_ERROR
, &serr
);
1499 sata_scr_write_flush(&ap
->link
, SCR_ERROR
, serr
);
1502 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1504 ata_ehi_push_desc(ehi
, "edma_err 0x%08x", edma_err_cause
);
1507 * all generations share these EDMA error cause bits
1510 if (edma_err_cause
& EDMA_ERR_DEV
)
1511 err_mask
|= AC_ERR_DEV
;
1512 if (edma_err_cause
& (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
1513 EDMA_ERR_CRQB_PAR
| EDMA_ERR_CRPB_PAR
|
1514 EDMA_ERR_INTRL_PAR
)) {
1515 err_mask
|= AC_ERR_ATA_BUS
;
1516 action
|= ATA_EH_RESET
;
1517 ata_ehi_push_desc(ehi
, "parity error");
1519 if (edma_err_cause
& (EDMA_ERR_DEV_DCON
| EDMA_ERR_DEV_CON
)) {
1520 ata_ehi_hotplugged(ehi
);
1521 ata_ehi_push_desc(ehi
, edma_err_cause
& EDMA_ERR_DEV_DCON
?
1522 "dev disconnect" : "dev connect");
1523 action
|= ATA_EH_RESET
;
1526 if (IS_GEN_I(hpriv
)) {
1527 eh_freeze_mask
= EDMA_EH_FREEZE_5
;
1529 if (edma_err_cause
& EDMA_ERR_SELF_DIS_5
) {
1530 pp
= ap
->private_data
;
1531 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1532 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1535 eh_freeze_mask
= EDMA_EH_FREEZE
;
1537 if (edma_err_cause
& EDMA_ERR_SELF_DIS
) {
1538 pp
= ap
->private_data
;
1539 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1540 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1543 if (edma_err_cause
& EDMA_ERR_SERR
) {
1544 sata_scr_read(&ap
->link
, SCR_ERROR
, &serr
);
1545 sata_scr_write_flush(&ap
->link
, SCR_ERROR
, serr
);
1546 err_mask
= AC_ERR_ATA_BUS
;
1547 action
|= ATA_EH_RESET
;
1551 /* Clear EDMA now that SERR cleanup done */
1552 writelfl(~edma_err_cause
, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1555 err_mask
= AC_ERR_OTHER
;
1556 action
|= ATA_EH_RESET
;
1559 ehi
->serror
|= serr
;
1560 ehi
->action
|= action
;
1563 qc
->err_mask
|= err_mask
;
1565 ehi
->err_mask
|= err_mask
;
1567 if (edma_err_cause
& eh_freeze_mask
)
1568 ata_port_freeze(ap
);
1573 static void mv_intr_pio(struct ata_port
*ap
)
1575 struct ata_queued_cmd
*qc
;
1578 /* ignore spurious intr if drive still BUSY */
1579 ata_status
= readb(ap
->ioaddr
.status_addr
);
1580 if (unlikely(ata_status
& ATA_BUSY
))
1583 /* get active ATA command */
1584 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1585 if (unlikely(!qc
)) /* no active tag */
1587 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
) /* polling; we don't own qc */
1590 /* and finally, complete the ATA command */
1591 qc
->err_mask
|= ac_err_mask(ata_status
);
1592 ata_qc_complete(qc
);
1595 static void mv_intr_edma(struct ata_port
*ap
)
1597 void __iomem
*port_mmio
= mv_ap_base(ap
);
1598 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1599 struct mv_port_priv
*pp
= ap
->private_data
;
1600 struct ata_queued_cmd
*qc
;
1601 u32 out_index
, in_index
;
1602 bool work_done
= false;
1604 /* get h/w response queue pointer */
1605 in_index
= (readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
)
1606 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1612 /* get s/w response queue last-read pointer, and compare */
1613 out_index
= pp
->resp_idx
& MV_MAX_Q_DEPTH_MASK
;
1614 if (in_index
== out_index
)
1617 /* 50xx: get active ATA command */
1618 if (IS_GEN_I(hpriv
))
1619 tag
= ap
->link
.active_tag
;
1621 /* Gen II/IIE: get active ATA command via tag, to enable
1622 * support for queueing. this works transparently for
1623 * queued and non-queued modes.
1626 tag
= le16_to_cpu(pp
->crpb
[out_index
].id
) & 0x1f;
1628 qc
= ata_qc_from_tag(ap
, tag
);
1630 /* For non-NCQ mode, the lower 8 bits of status
1631 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1632 * which should be zero if all went well.
1634 status
= le16_to_cpu(pp
->crpb
[out_index
].flags
);
1635 if ((status
& 0xff) && !(pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
)) {
1636 mv_err_intr(ap
, qc
);
1640 /* and finally, complete the ATA command */
1643 ac_err_mask(status
>> CRPB_FLAG_STATUS_SHIFT
);
1644 ata_qc_complete(qc
);
1647 /* advance software response queue pointer, to
1648 * indicate (after the loop completes) to hardware
1649 * that we have consumed a response queue entry.
1656 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) |
1657 (out_index
<< EDMA_RSP_Q_PTR_SHIFT
),
1658 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1662 * mv_host_intr - Handle all interrupts on the given host controller
1663 * @host: host specific structure
1664 * @relevant: port error bits relevant to this host controller
1665 * @hc: which host controller we're to look at
1667 * Read then write clear the HC interrupt status then walk each
1668 * port connected to the HC and see if it needs servicing. Port
1669 * success ints are reported in the HC interrupt status reg, the
1670 * port error ints are reported in the higher level main
1671 * interrupt status register and thus are passed in via the
1672 * 'relevant' argument.
1675 * Inherited from caller.
1677 static void mv_host_intr(struct ata_host
*host
, u32 relevant
, unsigned int hc
)
1679 struct mv_host_priv
*hpriv
= host
->private_data
;
1680 void __iomem
*mmio
= hpriv
->base
;
1681 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1683 int port
, port0
, last_port
;
1688 port0
= MV_PORTS_PER_HC
;
1691 last_port
= port0
+ MV_PORTS_PER_HC
;
1693 last_port
= port0
+ hpriv
->n_ports
;
1694 /* we'll need the HC success int register in most cases */
1695 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1699 writelfl(~hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1701 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1702 hc
, relevant
, hc_irq_cause
);
1704 for (port
= port0
; port
< last_port
; port
++) {
1705 struct ata_port
*ap
= host
->ports
[port
];
1706 struct mv_port_priv
*pp
;
1707 int have_err_bits
, hard_port
, shift
;
1709 if ((!ap
) || (ap
->flags
& ATA_FLAG_DISABLED
))
1712 pp
= ap
->private_data
;
1714 shift
= port
<< 1; /* (port * 2) */
1715 if (port
>= MV_PORTS_PER_HC
) {
1716 shift
++; /* skip bit 8 in the HC Main IRQ reg */
1718 have_err_bits
= ((PORT0_ERR
<< shift
) & relevant
);
1720 if (unlikely(have_err_bits
)) {
1721 struct ata_queued_cmd
*qc
;
1723 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1724 if (qc
&& (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1727 mv_err_intr(ap
, qc
);
1731 hard_port
= mv_hardport_from_port(port
); /* range 0..3 */
1733 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
1734 if ((CRPB_DMA_DONE
<< hard_port
) & hc_irq_cause
)
1737 if ((DEV_IRQ
<< hard_port
) & hc_irq_cause
)
1744 static void mv_pci_error(struct ata_host
*host
, void __iomem
*mmio
)
1746 struct mv_host_priv
*hpriv
= host
->private_data
;
1747 struct ata_port
*ap
;
1748 struct ata_queued_cmd
*qc
;
1749 struct ata_eh_info
*ehi
;
1750 unsigned int i
, err_mask
, printed
= 0;
1753 err_cause
= readl(mmio
+ hpriv
->irq_cause_ofs
);
1755 dev_printk(KERN_ERR
, host
->dev
, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1758 DPRINTK("All regs @ PCI error\n");
1759 mv_dump_all_regs(mmio
, -1, to_pci_dev(host
->dev
));
1761 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
1763 for (i
= 0; i
< host
->n_ports
; i
++) {
1764 ap
= host
->ports
[i
];
1765 if (!ata_link_offline(&ap
->link
)) {
1766 ehi
= &ap
->link
.eh_info
;
1767 ata_ehi_clear_desc(ehi
);
1769 ata_ehi_push_desc(ehi
,
1770 "PCI err cause 0x%08x", err_cause
);
1771 err_mask
= AC_ERR_HOST_BUS
;
1772 ehi
->action
= ATA_EH_RESET
;
1773 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1775 qc
->err_mask
|= err_mask
;
1777 ehi
->err_mask
|= err_mask
;
1779 ata_port_freeze(ap
);
1785 * mv_interrupt - Main interrupt event handler
1787 * @dev_instance: private data; in this case the host structure
1789 * Read the read only register to determine if any host
1790 * controllers have pending interrupts. If so, call lower level
1791 * routine to handle. Also check for PCI errors which are only
1795 * This routine holds the host lock while processing pending
1798 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
1800 struct ata_host
*host
= dev_instance
;
1801 struct mv_host_priv
*hpriv
= host
->private_data
;
1802 unsigned int hc
, handled
= 0, n_hcs
;
1803 void __iomem
*mmio
= hpriv
->base
;
1804 u32 irq_stat
, irq_mask
;
1806 spin_lock(&host
->lock
);
1808 irq_stat
= readl(hpriv
->main_cause_reg_addr
);
1809 irq_mask
= readl(hpriv
->main_mask_reg_addr
);
1811 /* check the cases where we either have nothing pending or have read
1812 * a bogus register value which can indicate HW removal or PCI fault
1814 if (!(irq_stat
& irq_mask
) || (0xffffffffU
== irq_stat
))
1817 n_hcs
= mv_get_hc_count(host
->ports
[0]->flags
);
1819 if (unlikely((irq_stat
& PCI_ERR
) && HAS_PCI(host
))) {
1820 mv_pci_error(host
, mmio
);
1822 goto out_unlock
; /* skip all other HC irq handling */
1825 for (hc
= 0; hc
< n_hcs
; hc
++) {
1826 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
1828 mv_host_intr(host
, relevant
, hc
);
1834 spin_unlock(&host
->lock
);
1836 return IRQ_RETVAL(handled
);
1839 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
1841 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
1842 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
1844 return hc_mmio
+ ofs
;
1847 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
1851 switch (sc_reg_in
) {
1855 ofs
= sc_reg_in
* sizeof(u32
);
1864 static int mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
)
1866 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1867 void __iomem
*mmio
= hpriv
->base
;
1868 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1869 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1871 if (ofs
!= 0xffffffffU
) {
1872 *val
= readl(addr
+ ofs
);
1878 static int mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1880 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1881 void __iomem
*mmio
= hpriv
->base
;
1882 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1883 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1885 if (ofs
!= 0xffffffffU
) {
1886 writelfl(val
, addr
+ ofs
);
1892 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
1894 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1897 early_5080
= (pdev
->device
== 0x5080) && (pdev
->revision
== 0);
1900 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1902 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1905 mv_reset_pci_bus(host
, mmio
);
1908 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1910 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL
);
1913 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1916 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
1919 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1921 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
1922 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
1925 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1929 writel(0, mmio
+ MV_GPIO_PORT_CTL
);
1931 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1933 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1935 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1938 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1941 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
1942 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1944 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
1947 tmp
= readl(phy_mmio
+ MV5_LT_MODE
);
1949 writel(tmp
, phy_mmio
+ MV5_LT_MODE
);
1951 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
1954 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
1957 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1959 tmp
|= hpriv
->signal
[port
].pre
;
1960 tmp
|= hpriv
->signal
[port
].amps
;
1961 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
1966 #define ZERO(reg) writel(0, port_mmio + (reg))
1967 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1970 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1972 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1974 mv_channel_reset(hpriv
, mmio
, port
);
1976 ZERO(0x028); /* command */
1977 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
1978 ZERO(0x004); /* timer */
1979 ZERO(0x008); /* irq err cause */
1980 ZERO(0x00c); /* irq err mask */
1981 ZERO(0x010); /* rq bah */
1982 ZERO(0x014); /* rq inp */
1983 ZERO(0x018); /* rq outp */
1984 ZERO(0x01c); /* respq bah */
1985 ZERO(0x024); /* respq outp */
1986 ZERO(0x020); /* respq inp */
1987 ZERO(0x02c); /* test control */
1988 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
1992 #define ZERO(reg) writel(0, hc_mmio + (reg))
1993 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1996 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2004 tmp
= readl(hc_mmio
+ 0x20);
2007 writel(tmp
, hc_mmio
+ 0x20);
2011 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2014 unsigned int hc
, port
;
2016 for (hc
= 0; hc
< n_hc
; hc
++) {
2017 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
2018 mv5_reset_hc_port(hpriv
, mmio
,
2019 (hc
* MV_PORTS_PER_HC
) + port
);
2021 mv5_reset_one_hc(hpriv
, mmio
, hc
);
2028 #define ZERO(reg) writel(0, mmio + (reg))
2029 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
)
2031 struct mv_host_priv
*hpriv
= host
->private_data
;
2034 tmp
= readl(mmio
+ MV_PCI_MODE
);
2036 writel(tmp
, mmio
+ MV_PCI_MODE
);
2038 ZERO(MV_PCI_DISC_TIMER
);
2039 ZERO(MV_PCI_MSI_TRIGGER
);
2040 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
2041 ZERO(HC_MAIN_IRQ_MASK_OFS
);
2042 ZERO(MV_PCI_SERR_MASK
);
2043 ZERO(hpriv
->irq_cause_ofs
);
2044 ZERO(hpriv
->irq_mask_ofs
);
2045 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
2046 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
2047 ZERO(MV_PCI_ERR_ATTRIBUTE
);
2048 ZERO(MV_PCI_ERR_COMMAND
);
2052 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2056 mv5_reset_flash(hpriv
, mmio
);
2058 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL
);
2060 tmp
|= (1 << 5) | (1 << 6);
2061 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL
);
2065 * mv6_reset_hc - Perform the 6xxx global soft reset
2066 * @mmio: base address of the HBA
2068 * This routine only applies to 6xxx parts.
2071 * Inherited from caller.
2073 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2076 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
2080 /* Following procedure defined in PCI "main command and status
2084 writel(t
| STOP_PCI_MASTER
, reg
);
2086 for (i
= 0; i
< 1000; i
++) {
2089 if (PCI_MASTER_EMPTY
& t
)
2092 if (!(PCI_MASTER_EMPTY
& t
)) {
2093 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
2101 writel(t
| GLOB_SFT_RST
, reg
);
2104 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
2106 if (!(GLOB_SFT_RST
& t
)) {
2107 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
2112 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2115 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
2118 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
2120 if (GLOB_SFT_RST
& t
) {
2121 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
2128 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2131 void __iomem
*port_mmio
;
2134 tmp
= readl(mmio
+ MV_RESET_CFG
);
2135 if ((tmp
& (1 << 0)) == 0) {
2136 hpriv
->signal
[idx
].amps
= 0x7 << 8;
2137 hpriv
->signal
[idx
].pre
= 0x1 << 5;
2141 port_mmio
= mv_port_base(mmio
, idx
);
2142 tmp
= readl(port_mmio
+ PHY_MODE2
);
2144 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
2145 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
2148 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2150 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL
);
2153 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2156 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2158 u32 hp_flags
= hpriv
->hp_flags
;
2160 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2162 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2165 if (fix_phy_mode2
) {
2166 m2
= readl(port_mmio
+ PHY_MODE2
);
2169 writel(m2
, port_mmio
+ PHY_MODE2
);
2173 m2
= readl(port_mmio
+ PHY_MODE2
);
2174 m2
&= ~((1 << 16) | (1 << 31));
2175 writel(m2
, port_mmio
+ PHY_MODE2
);
2180 /* who knows what this magic does */
2181 tmp
= readl(port_mmio
+ PHY_MODE3
);
2184 writel(tmp
, port_mmio
+ PHY_MODE3
);
2186 if (fix_phy_mode4
) {
2189 m4
= readl(port_mmio
+ PHY_MODE4
);
2191 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
2192 tmp
= readl(port_mmio
+ 0x310);
2194 m4
= (m4
& ~(1 << 1)) | (1 << 0);
2196 writel(m4
, port_mmio
+ PHY_MODE4
);
2198 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
2199 writel(tmp
, port_mmio
+ 0x310);
2202 /* Revert values of pre-emphasis and signal amps to the saved ones */
2203 m2
= readl(port_mmio
+ PHY_MODE2
);
2205 m2
&= ~MV_M2_PREAMP_MASK
;
2206 m2
|= hpriv
->signal
[port
].amps
;
2207 m2
|= hpriv
->signal
[port
].pre
;
2210 /* according to mvSata 3.6.1, some IIE values are fixed */
2211 if (IS_GEN_IIE(hpriv
)) {
2216 writel(m2
, port_mmio
+ PHY_MODE2
);
2219 /* TODO: use the generic LED interface to configure the SATA Presence */
2220 /* & Acitivy LEDs on the board */
2221 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
2227 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2230 void __iomem
*port_mmio
;
2233 port_mmio
= mv_port_base(mmio
, idx
);
2234 tmp
= readl(port_mmio
+ PHY_MODE2
);
2236 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
2237 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
2241 #define ZERO(reg) writel(0, port_mmio + (reg))
2242 static void mv_soc_reset_hc_port(struct mv_host_priv
*hpriv
,
2243 void __iomem
*mmio
, unsigned int port
)
2245 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2247 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
2249 mv_channel_reset(hpriv
, mmio
, port
);
2251 ZERO(0x028); /* command */
2252 writel(0x101f, port_mmio
+ EDMA_CFG_OFS
);
2253 ZERO(0x004); /* timer */
2254 ZERO(0x008); /* irq err cause */
2255 ZERO(0x00c); /* irq err mask */
2256 ZERO(0x010); /* rq bah */
2257 ZERO(0x014); /* rq inp */
2258 ZERO(0x018); /* rq outp */
2259 ZERO(0x01c); /* respq bah */
2260 ZERO(0x024); /* respq outp */
2261 ZERO(0x020); /* respq inp */
2262 ZERO(0x02c); /* test control */
2263 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
2268 #define ZERO(reg) writel(0, hc_mmio + (reg))
2269 static void mv_soc_reset_one_hc(struct mv_host_priv
*hpriv
,
2272 void __iomem
*hc_mmio
= mv_hc_base(mmio
, 0);
2282 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
2283 void __iomem
*mmio
, unsigned int n_hc
)
2287 for (port
= 0; port
< hpriv
->n_ports
; port
++)
2288 mv_soc_reset_hc_port(hpriv
, mmio
, port
);
2290 mv_soc_reset_one_hc(hpriv
, mmio
);
2295 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
2301 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
2306 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2307 unsigned int port_no
)
2309 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
2311 writelfl(ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
2313 if (IS_GEN_II(hpriv
)) {
2314 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2315 ifctl
|= (1 << 7); /* enable gen2i speed */
2316 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2317 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2320 udelay(25); /* allow reset propagation */
2322 /* Spec never mentions clearing the bit. Marvell's driver does
2323 * clear the bit, however.
2325 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
2327 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
2329 if (IS_GEN_I(hpriv
))
2334 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2335 * @ap: ATA channel to manipulate
2337 * Part of this is taken from __sata_phy_reset and modified to
2338 * not sleep since this routine gets called from interrupt level.
2341 * Inherited from caller. This is coded to safe to call at
2342 * interrupt level, i.e. it does not sleep.
2344 static void mv_phy_reset(struct ata_port
*ap
, unsigned int *class,
2345 unsigned long deadline
)
2347 struct mv_port_priv
*pp
= ap
->private_data
;
2348 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2349 void __iomem
*port_mmio
= mv_ap_base(ap
);
2353 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
2357 u32 sstatus
, serror
, scontrol
;
2359 mv_scr_read(ap
, SCR_STATUS
, &sstatus
);
2360 mv_scr_read(ap
, SCR_ERROR
, &serror
);
2361 mv_scr_read(ap
, SCR_CONTROL
, &scontrol
);
2362 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2363 "SCtrl 0x%08x\n", sstatus
, serror
, scontrol
);
2367 /* Issue COMRESET via SControl */
2369 sata_scr_write_flush(&ap
->link
, SCR_CONTROL
, 0x301);
2372 sata_scr_write_flush(&ap
->link
, SCR_CONTROL
, 0x300);
2376 sata_scr_read(&ap
->link
, SCR_STATUS
, &sstatus
);
2377 if (((sstatus
& 0x3) == 3) || ((sstatus
& 0x3) == 0))
2381 } while (time_before(jiffies
, deadline
));
2383 /* work around errata */
2384 if (IS_GEN_II(hpriv
) &&
2385 (sstatus
!= 0x0) && (sstatus
!= 0x113) && (sstatus
!= 0x123) &&
2387 goto comreset_retry
;
2391 u32 sstatus
, serror
, scontrol
;
2393 mv_scr_read(ap
, SCR_STATUS
, &sstatus
);
2394 mv_scr_read(ap
, SCR_ERROR
, &serror
);
2395 mv_scr_read(ap
, SCR_CONTROL
, &scontrol
);
2396 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2397 "SCtrl 0x%08x\n", sstatus
, serror
, scontrol
);
2401 if (ata_link_offline(&ap
->link
)) {
2402 *class = ATA_DEV_NONE
;
2406 /* even after SStatus reflects that device is ready,
2407 * it seems to take a while for link to be fully
2408 * established (and thus Status no longer 0x80/0x7F),
2409 * so we poll a bit for that, here.
2413 u8 drv_stat
= ata_check_status(ap
);
2414 if ((drv_stat
!= 0x80) && (drv_stat
!= 0x7f))
2419 if (time_after(jiffies
, deadline
))
2423 /* FIXME: if we passed the deadline, the following
2424 * code probably produces an invalid result
2427 /* finally, read device signature from TF registers */
2428 *class = ata_dev_try_classify(ap
->link
.device
, 1, NULL
);
2430 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2432 WARN_ON(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
2437 static int mv_prereset(struct ata_link
*link
, unsigned long deadline
)
2439 struct ata_port
*ap
= link
->ap
;
2440 struct mv_port_priv
*pp
= ap
->private_data
;
2444 if (!(pp
->pp_flags
& MV_PP_FLAG_HAD_A_RESET
))
2445 pp
->pp_flags
|= MV_PP_FLAG_HAD_A_RESET
;
2450 static int mv_hardreset(struct ata_link
*link
, unsigned int *class,
2451 unsigned long deadline
)
2453 struct ata_port
*ap
= link
->ap
;
2454 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2455 void __iomem
*mmio
= hpriv
->base
;
2459 mv_channel_reset(hpriv
, mmio
, ap
->port_no
);
2461 mv_phy_reset(ap
, class, deadline
);
2466 static void mv_postreset(struct ata_link
*link
, unsigned int *classes
)
2468 struct ata_port
*ap
= link
->ap
;
2471 /* print link status */
2472 sata_print_link_status(link
);
2475 sata_scr_read(link
, SCR_ERROR
, &serr
);
2476 sata_scr_write_flush(link
, SCR_ERROR
, serr
);
2478 /* bail out if no device is present */
2479 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
2480 DPRINTK("EXIT, no device\n");
2484 /* set up device control */
2485 iowrite8(ap
->ctl
, ap
->ioaddr
.ctl_addr
);
2488 static void mv_error_handler(struct ata_port
*ap
)
2490 ata_do_eh(ap
, mv_prereset
, ata_std_softreset
,
2491 mv_hardreset
, mv_postreset
);
2494 static void mv_eh_freeze(struct ata_port
*ap
)
2496 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2497 unsigned int hc
= (ap
->port_no
> 3) ? 1 : 0;
2501 /* FIXME: handle coalescing completion events properly */
2503 shift
= ap
->port_no
* 2;
2507 mask
= 0x3 << shift
;
2509 /* disable assertion of portN err, done events */
2510 tmp
= readl(hpriv
->main_mask_reg_addr
);
2511 writelfl(tmp
& ~mask
, hpriv
->main_mask_reg_addr
);
2514 static void mv_eh_thaw(struct ata_port
*ap
)
2516 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2517 void __iomem
*mmio
= hpriv
->base
;
2518 unsigned int hc
= (ap
->port_no
> 3) ? 1 : 0;
2519 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2520 void __iomem
*port_mmio
= mv_ap_base(ap
);
2521 u32 tmp
, mask
, hc_irq_cause
;
2522 unsigned int shift
, hc_port_no
= ap
->port_no
;
2524 /* FIXME: handle coalescing completion events properly */
2526 shift
= ap
->port_no
* 2;
2532 mask
= 0x3 << shift
;
2534 /* clear EDMA errors on this port */
2535 writel(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2537 /* clear pending irq events */
2538 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2539 hc_irq_cause
&= ~(1 << hc_port_no
); /* clear CRPB-done */
2540 hc_irq_cause
&= ~(1 << (hc_port_no
+ 8)); /* clear Device int */
2541 writel(hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2543 /* enable assertion of portN err, done events */
2544 tmp
= readl(hpriv
->main_mask_reg_addr
);
2545 writelfl(tmp
| mask
, hpriv
->main_mask_reg_addr
);
2549 * mv_port_init - Perform some early initialization on a single port.
2550 * @port: libata data structure storing shadow register addresses
2551 * @port_mmio: base address of the port
2553 * Initialize shadow register mmio addresses, clear outstanding
2554 * interrupts on the port, and unmask interrupts for the future
2555 * start of the port.
2558 * Inherited from caller.
2560 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2562 void __iomem
*shd_base
= port_mmio
+ SHD_BLK_OFS
;
2565 /* PIO related setup
2567 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2569 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2570 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2571 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2572 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2573 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2574 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2576 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2577 /* special case: control/altstatus doesn't have ATA_REG_ address */
2578 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2581 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= NULL
;
2583 /* Clear any currently outstanding port interrupt conditions */
2584 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2585 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2586 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2588 /* unmask all non-transient EDMA error interrupts */
2589 writelfl(~EDMA_ERR_IRQ_TRANSIENT
, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2591 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2592 readl(port_mmio
+ EDMA_CFG_OFS
),
2593 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2594 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2597 static int mv_chip_id(struct ata_host
*host
, unsigned int board_idx
)
2599 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2600 struct mv_host_priv
*hpriv
= host
->private_data
;
2601 u32 hp_flags
= hpriv
->hp_flags
;
2603 switch (board_idx
) {
2605 hpriv
->ops
= &mv5xxx_ops
;
2606 hp_flags
|= MV_HP_GEN_I
;
2608 switch (pdev
->revision
) {
2610 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2613 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2616 dev_printk(KERN_WARNING
, &pdev
->dev
,
2617 "Applying 50XXB2 workarounds to unknown rev\n");
2618 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2625 hpriv
->ops
= &mv5xxx_ops
;
2626 hp_flags
|= MV_HP_GEN_I
;
2628 switch (pdev
->revision
) {
2630 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2633 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2636 dev_printk(KERN_WARNING
, &pdev
->dev
,
2637 "Applying B2 workarounds to unknown rev\n");
2638 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2645 hpriv
->ops
= &mv6xxx_ops
;
2646 hp_flags
|= MV_HP_GEN_II
;
2648 switch (pdev
->revision
) {
2650 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2653 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2656 dev_printk(KERN_WARNING
, &pdev
->dev
,
2657 "Applying B2 workarounds to unknown rev\n");
2658 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2664 hp_flags
|= MV_HP_PCIE
;
2665 if (pdev
->vendor
== PCI_VENDOR_ID_TTI
&&
2666 (pdev
->device
== 0x2300 || pdev
->device
== 0x2310))
2669 * Highpoint RocketRAID PCIe 23xx series cards:
2671 * Unconfigured drives are treated as "Legacy"
2672 * by the BIOS, and it overwrites sector 8 with
2673 * a "Lgcy" metadata block prior to Linux boot.
2675 * Configured drives (RAID or JBOD) leave sector 8
2676 * alone, but instead overwrite a high numbered
2677 * sector for the RAID metadata. This sector can
2678 * be determined exactly, by truncating the physical
2679 * drive capacity to a nice even GB value.
2681 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2683 * Warn the user, lest they think we're just buggy.
2685 printk(KERN_WARNING DRV_NAME
": Highpoint RocketRAID"
2686 " BIOS CORRUPTS DATA on all attached drives,"
2687 " regardless of if/how they are configured."
2689 printk(KERN_WARNING DRV_NAME
": For data safety, do not"
2690 " use sectors 8-9 on \"Legacy\" drives,"
2691 " and avoid the final two gigabytes on"
2692 " all RocketRAID BIOS initialized drives.\n");
2695 hpriv
->ops
= &mv6xxx_ops
;
2696 hp_flags
|= MV_HP_GEN_IIE
;
2698 switch (pdev
->revision
) {
2700 hp_flags
|= MV_HP_ERRATA_XX42A0
;
2703 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2706 dev_printk(KERN_WARNING
, &pdev
->dev
,
2707 "Applying 60X1C0 workarounds to unknown rev\n");
2708 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2713 hpriv
->ops
= &mv_soc_ops
;
2714 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2718 dev_printk(KERN_ERR
, host
->dev
,
2719 "BUG: invalid board index %u\n", board_idx
);
2723 hpriv
->hp_flags
= hp_flags
;
2724 if (hp_flags
& MV_HP_PCIE
) {
2725 hpriv
->irq_cause_ofs
= PCIE_IRQ_CAUSE_OFS
;
2726 hpriv
->irq_mask_ofs
= PCIE_IRQ_MASK_OFS
;
2727 hpriv
->unmask_all_irqs
= PCIE_UNMASK_ALL_IRQS
;
2729 hpriv
->irq_cause_ofs
= PCI_IRQ_CAUSE_OFS
;
2730 hpriv
->irq_mask_ofs
= PCI_IRQ_MASK_OFS
;
2731 hpriv
->unmask_all_irqs
= PCI_UNMASK_ALL_IRQS
;
2738 * mv_init_host - Perform some early initialization of the host.
2739 * @host: ATA host to initialize
2740 * @board_idx: controller index
2742 * If possible, do an early global reset of the host. Then do
2743 * our port init and clear/unmask all/relevant host interrupts.
2746 * Inherited from caller.
2748 static int mv_init_host(struct ata_host
*host
, unsigned int board_idx
)
2750 int rc
= 0, n_hc
, port
, hc
;
2751 struct mv_host_priv
*hpriv
= host
->private_data
;
2752 void __iomem
*mmio
= hpriv
->base
;
2754 rc
= mv_chip_id(host
, board_idx
);
2758 if (HAS_PCI(host
)) {
2759 hpriv
->main_cause_reg_addr
= hpriv
->base
+
2760 HC_MAIN_IRQ_CAUSE_OFS
;
2761 hpriv
->main_mask_reg_addr
= hpriv
->base
+ HC_MAIN_IRQ_MASK_OFS
;
2763 hpriv
->main_cause_reg_addr
= hpriv
->base
+
2764 HC_SOC_MAIN_IRQ_CAUSE_OFS
;
2765 hpriv
->main_mask_reg_addr
= hpriv
->base
+
2766 HC_SOC_MAIN_IRQ_MASK_OFS
;
2768 /* global interrupt mask */
2769 writel(0, hpriv
->main_mask_reg_addr
);
2771 n_hc
= mv_get_hc_count(host
->ports
[0]->flags
);
2773 for (port
= 0; port
< host
->n_ports
; port
++)
2774 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
2776 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
2780 hpriv
->ops
->reset_flash(hpriv
, mmio
);
2781 hpriv
->ops
->reset_bus(host
, mmio
);
2782 hpriv
->ops
->enable_leds(hpriv
, mmio
);
2784 for (port
= 0; port
< host
->n_ports
; port
++) {
2785 if (IS_GEN_II(hpriv
)) {
2786 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2788 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2789 ifctl
|= (1 << 7); /* enable gen2i speed */
2790 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2791 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2794 hpriv
->ops
->phy_errata(hpriv
, mmio
, port
);
2797 for (port
= 0; port
< host
->n_ports
; port
++) {
2798 struct ata_port
*ap
= host
->ports
[port
];
2799 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2801 mv_port_init(&ap
->ioaddr
, port_mmio
);
2804 if (HAS_PCI(host
)) {
2805 unsigned int offset
= port_mmio
- mmio
;
2806 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, -1, "mmio");
2807 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, offset
, "port");
2812 for (hc
= 0; hc
< n_hc
; hc
++) {
2813 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2815 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2816 "(before clear)=0x%08x\n", hc
,
2817 readl(hc_mmio
+ HC_CFG_OFS
),
2818 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
2820 /* Clear any currently outstanding hc interrupt conditions */
2821 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2824 if (HAS_PCI(host
)) {
2825 /* Clear any currently outstanding host interrupt conditions */
2826 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
2828 /* and unmask interrupt generation for host regs */
2829 writelfl(hpriv
->unmask_all_irqs
, mmio
+ hpriv
->irq_mask_ofs
);
2830 if (IS_GEN_I(hpriv
))
2831 writelfl(~HC_MAIN_MASKED_IRQS_5
,
2832 hpriv
->main_mask_reg_addr
);
2834 writelfl(~HC_MAIN_MASKED_IRQS
,
2835 hpriv
->main_mask_reg_addr
);
2837 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2838 "PCI int cause/mask=0x%08x/0x%08x\n",
2839 readl(hpriv
->main_cause_reg_addr
),
2840 readl(hpriv
->main_mask_reg_addr
),
2841 readl(mmio
+ hpriv
->irq_cause_ofs
),
2842 readl(mmio
+ hpriv
->irq_mask_ofs
));
2844 writelfl(~HC_MAIN_MASKED_IRQS_SOC
,
2845 hpriv
->main_mask_reg_addr
);
2846 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2847 readl(hpriv
->main_cause_reg_addr
),
2848 readl(hpriv
->main_mask_reg_addr
));
2854 static int mv_create_dma_pools(struct mv_host_priv
*hpriv
, struct device
*dev
)
2856 hpriv
->crqb_pool
= dmam_pool_create("crqb_q", dev
, MV_CRQB_Q_SZ
,
2858 if (!hpriv
->crqb_pool
)
2861 hpriv
->crpb_pool
= dmam_pool_create("crpb_q", dev
, MV_CRPB_Q_SZ
,
2863 if (!hpriv
->crpb_pool
)
2866 hpriv
->sg_tbl_pool
= dmam_pool_create("sg_tbl", dev
, MV_SG_TBL_SZ
,
2868 if (!hpriv
->sg_tbl_pool
)
2875 * mv_platform_probe - handle a positive probe of an soc Marvell
2877 * @pdev: platform device found
2880 * Inherited from caller.
2882 static int mv_platform_probe(struct platform_device
*pdev
)
2884 static int printed_version
;
2885 const struct mv_sata_platform_data
*mv_platform_data
;
2886 const struct ata_port_info
*ppi
[] =
2887 { &mv_port_info
[chip_soc
], NULL
};
2888 struct ata_host
*host
;
2889 struct mv_host_priv
*hpriv
;
2890 struct resource
*res
;
2893 if (!printed_version
++)
2894 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2897 * Simple resource validation ..
2899 if (unlikely(pdev
->num_resources
!= 2)) {
2900 dev_err(&pdev
->dev
, "invalid number of resources\n");
2905 * Get the register base first
2907 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2912 mv_platform_data
= pdev
->dev
.platform_data
;
2913 n_ports
= mv_platform_data
->n_ports
;
2915 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
2916 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
2918 if (!host
|| !hpriv
)
2920 host
->private_data
= hpriv
;
2921 hpriv
->n_ports
= n_ports
;
2924 hpriv
->base
= devm_ioremap(&pdev
->dev
, res
->start
,
2925 res
->end
- res
->start
+ 1);
2926 hpriv
->base
-= MV_SATAHC0_REG_BASE
;
2928 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
2932 /* initialize adapter */
2933 rc
= mv_init_host(host
, chip_soc
);
2937 dev_printk(KERN_INFO
, &pdev
->dev
,
2938 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH
,
2941 return ata_host_activate(host
, platform_get_irq(pdev
, 0), mv_interrupt
,
2942 IRQF_SHARED
, &mv6_sht
);
2947 * mv_platform_remove - unplug a platform interface
2948 * @pdev: platform device
2950 * A platform bus SATA device has been unplugged. Perform the needed
2951 * cleanup. Also called on module unload for any active devices.
2953 static int __devexit
mv_platform_remove(struct platform_device
*pdev
)
2955 struct device
*dev
= &pdev
->dev
;
2956 struct ata_host
*host
= dev_get_drvdata(dev
);
2958 ata_host_detach(host
);
2962 static struct platform_driver mv_platform_driver
= {
2963 .probe
= mv_platform_probe
,
2964 .remove
= __devexit_p(mv_platform_remove
),
2967 .owner
= THIS_MODULE
,
2973 static int mv_pci_init_one(struct pci_dev
*pdev
,
2974 const struct pci_device_id
*ent
);
2977 static struct pci_driver mv_pci_driver
= {
2979 .id_table
= mv_pci_tbl
,
2980 .probe
= mv_pci_init_one
,
2981 .remove
= ata_pci_remove_one
,
2987 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
2990 /* move to PCI layer or libata core? */
2991 static int pci_go_64(struct pci_dev
*pdev
)
2995 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
2996 rc
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
2998 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3000 dev_printk(KERN_ERR
, &pdev
->dev
,
3001 "64-bit DMA enable failed\n");
3006 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
3008 dev_printk(KERN_ERR
, &pdev
->dev
,
3009 "32-bit DMA enable failed\n");
3012 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3014 dev_printk(KERN_ERR
, &pdev
->dev
,
3015 "32-bit consistent DMA enable failed\n");
3024 * mv_print_info - Dump key info to kernel log for perusal.
3025 * @host: ATA host to print info about
3027 * FIXME: complete this.
3030 * Inherited from caller.
3032 static void mv_print_info(struct ata_host
*host
)
3034 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
3035 struct mv_host_priv
*hpriv
= host
->private_data
;
3037 const char *scc_s
, *gen
;
3039 /* Use this to determine the HW stepping of the chip so we know
3040 * what errata to workaround
3042 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
3045 else if (scc
== 0x01)
3050 if (IS_GEN_I(hpriv
))
3052 else if (IS_GEN_II(hpriv
))
3054 else if (IS_GEN_IIE(hpriv
))
3059 dev_printk(KERN_INFO
, &pdev
->dev
,
3060 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3061 gen
, (unsigned)MV_MAX_Q_DEPTH
, host
->n_ports
,
3062 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
3066 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3067 * @pdev: PCI device found
3068 * @ent: PCI device ID entry for the matched host
3071 * Inherited from caller.
3073 static int mv_pci_init_one(struct pci_dev
*pdev
,
3074 const struct pci_device_id
*ent
)
3076 static int printed_version
;
3077 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
3078 const struct ata_port_info
*ppi
[] = { &mv_port_info
[board_idx
], NULL
};
3079 struct ata_host
*host
;
3080 struct mv_host_priv
*hpriv
;
3083 if (!printed_version
++)
3084 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
3087 n_ports
= mv_get_hc_count(ppi
[0]->flags
) * MV_PORTS_PER_HC
;
3089 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
3090 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
3091 if (!host
|| !hpriv
)
3093 host
->private_data
= hpriv
;
3094 hpriv
->n_ports
= n_ports
;
3096 /* acquire resources */
3097 rc
= pcim_enable_device(pdev
);
3101 rc
= pcim_iomap_regions(pdev
, 1 << MV_PRIMARY_BAR
, DRV_NAME
);
3103 pcim_pin_device(pdev
);
3106 host
->iomap
= pcim_iomap_table(pdev
);
3107 hpriv
->base
= host
->iomap
[MV_PRIMARY_BAR
];
3109 rc
= pci_go_64(pdev
);
3113 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
3117 /* initialize adapter */
3118 rc
= mv_init_host(host
, board_idx
);
3122 /* Enable interrupts */
3123 if (msi
&& pci_enable_msi(pdev
))
3126 mv_dump_pci_cfg(pdev
, 0x68);
3127 mv_print_info(host
);
3129 pci_set_master(pdev
);
3130 pci_try_set_mwi(pdev
);
3131 return ata_host_activate(host
, pdev
->irq
, mv_interrupt
, IRQF_SHARED
,
3132 IS_GEN_I(hpriv
) ? &mv5_sht
: &mv6_sht
);
3136 static int mv_platform_probe(struct platform_device
*pdev
);
3137 static int __devexit
mv_platform_remove(struct platform_device
*pdev
);
3139 static int __init
mv_init(void)
3143 rc
= pci_register_driver(&mv_pci_driver
);
3147 rc
= platform_driver_register(&mv_platform_driver
);
3151 pci_unregister_driver(&mv_pci_driver
);
3156 static void __exit
mv_exit(void)
3159 pci_unregister_driver(&mv_pci_driver
);
3161 platform_driver_unregister(&mv_platform_driver
);
3164 MODULE_AUTHOR("Brett Russ");
3165 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3166 MODULE_LICENSE("GPL");
3167 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
3168 MODULE_VERSION(DRV_VERSION
);
3169 MODULE_ALIAS("platform:sata_mv");
3172 module_param(msi
, int, 0444);
3173 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
3176 module_init(mv_init
);
3177 module_exit(mv_exit
);