2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Improve/fix IRQ and error handling sequences.
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 6) Add port multiplier support (intermediate)
44 8) Develop a low-power-consumption strategy, and implement it.
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
88 MV_IO_BAR
= 2, /* offset 0x18: IO space */
89 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
95 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE
= (MV_IRQ_COAL_REG_BASE
+ 0x08),
97 MV_IRQ_COAL_CAUSE_LO
= (MV_IRQ_COAL_REG_BASE
+ 0x88),
98 MV_IRQ_COAL_CAUSE_HI
= (MV_IRQ_COAL_REG_BASE
+ 0x8c),
99 MV_IRQ_COAL_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xd0),
102 MV_SATAHC0_REG_BASE
= 0x20000,
103 MV_FLASH_CTL
= 0x1046c,
104 MV_GPIO_PORT_CTL
= 0x104f0,
105 MV_RESET_CFG
= 0x180d8,
107 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
108 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
109 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
110 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
113 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
120 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
122 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT
= 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
131 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC
= (1 << 28),
136 MV_COMMON_FLAGS
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
137 ATA_FLAG_MMIO
| ATA_FLAG_NO_ATAPI
|
138 ATA_FLAG_PIO_POLLING
,
139 MV_6XXX_FLAGS
= MV_FLAG_IRQ_COALESCE
,
141 CRQB_FLAG_READ
= (1 << 0),
143 CRQB_IOID_SHIFT
= 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT
= 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT
= 8,
146 CRQB_CMD_CS
= (0x2 << 11),
147 CRQB_CMD_LAST
= (1 << 15),
149 CRPB_FLAG_STATUS_SHIFT
= 8,
150 CRPB_IOID_SHIFT_6
= 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7
= 7, /* CRPB Gen-IIE IO Id shift */
153 EPRD_FLAG_END_OF_TBL
= (1 << 31),
155 /* PCI interface registers */
157 PCI_COMMAND_OFS
= 0xc00,
159 PCI_MAIN_CMD_STS_OFS
= 0xd30,
160 STOP_PCI_MASTER
= (1 << 2),
161 PCI_MASTER_EMPTY
= (1 << 3),
162 GLOB_SFT_RST
= (1 << 4),
165 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
166 MV_PCI_DISC_TIMER
= 0xd04,
167 MV_PCI_MSI_TRIGGER
= 0xc38,
168 MV_PCI_SERR_MASK
= 0xc28,
169 MV_PCI_XBAR_TMOUT
= 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
173 MV_PCI_ERR_COMMAND
= 0x1d50,
175 PCI_IRQ_CAUSE_OFS
= 0x1d58,
176 PCI_IRQ_MASK_OFS
= 0x1d5c,
177 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
179 PCIE_IRQ_CAUSE_OFS
= 0x1900,
180 PCIE_IRQ_MASK_OFS
= 0x1910,
181 PCIE_UNMASK_ALL_IRQS
= 0x40a, /* assorted bits */
183 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS
= 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS
= 0x20024,
187 PORT0_ERR
= (1 << 0), /* shift by port # */
188 PORT0_DONE
= (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
192 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE
= (1 << 8),
195 PORTS_4_7_COAL_DONE
= (1 << 17),
196 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT
= (1 << 22),
198 SELF_INT
= (1 << 23),
199 TWSI_INT
= (1 << 24),
200 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5
= (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC
= (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
204 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
206 HC_MAIN_MASKED_IRQS_5
= (PORTS_0_3_COAL_DONE
| PORTS_4_7_COAL_DONE
|
208 HC_MAIN_MASKED_IRQS_SOC
= (PORTS_0_3_COAL_DONE
| HC_MAIN_RSVD_SOC
),
210 /* SATAHC registers */
213 HC_IRQ_CAUSE_OFS
= 0x14,
214 CRPB_DMA_DONE
= (1 << 0), /* shift by port # */
215 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
216 DEV_IRQ
= (1 << 8), /* shift by port # */
218 /* Shadow block registers */
220 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
223 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS
= 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS
= 0x364,
232 SATA_INTERFACE_CTL
= 0x050,
234 MV_M2_PREAMP_MASK
= 0x7e0,
238 EDMA_CFG_Q_DEPTH
= 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ
= (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
244 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
245 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
246 EDMA_ERR_D_PAR
= (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR
= (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV
= (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON
= (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON
= (1 << 4), /* device connected */
251 EDMA_ERR_SERR
= (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS
= (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5
= (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC
= (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7
= (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR
= (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR
= (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR
= (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY
= (1 << 12), /* IORdy timeout */
261 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0
= (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1
= (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3
= (1 << 16), /* transient: FIS rx err */
267 EDMA_ERR_LNK_DATA_RX
= (0xf << 17), /* link data rx error */
269 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0
= (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1
= (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2
= (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3
= (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4
= (1 << 25), /* transient: FIS collision */
276 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26), /* link data tx error */
278 EDMA_ERR_TRANS_PROTO
= (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5
= (1 << 5),
280 EDMA_ERR_UNDERRUN_5
= (1 << 6),
282 EDMA_ERR_IRQ_TRANSIENT
= EDMA_ERR_LNK_CTRL_RX_0
|
283 EDMA_ERR_LNK_CTRL_RX_1
|
284 EDMA_ERR_LNK_CTRL_RX_3
|
285 EDMA_ERR_LNK_CTRL_TX
,
287 EDMA_EH_FREEZE
= EDMA_ERR_D_PAR
|
297 EDMA_ERR_LNK_CTRL_RX_2
|
298 EDMA_ERR_LNK_DATA_RX
|
299 EDMA_ERR_LNK_DATA_TX
|
300 EDMA_ERR_TRANS_PROTO
,
301 EDMA_EH_FREEZE_5
= EDMA_ERR_D_PAR
|
306 EDMA_ERR_UNDERRUN_5
|
307 EDMA_ERR_SELF_DIS_5
|
313 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
316 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
317 EDMA_REQ_Q_PTR_SHIFT
= 5,
319 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT
= 3,
324 EDMA_CMD_OFS
= 0x28, /* EDMA command register */
325 EDMA_EN
= (1 << 0), /* enable EDMA */
326 EDMA_DS
= (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST
= (1 << 2), /* reset trans/link/phy */
329 EDMA_IORDY_TMOUT
= 0x34,
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI
= (1 << 0),
334 MV_HP_ERRATA_50XXB0
= (1 << 1),
335 MV_HP_ERRATA_50XXB2
= (1 << 2),
336 MV_HP_ERRATA_60X1B2
= (1 << 3),
337 MV_HP_ERRATA_60X1C0
= (1 << 4),
338 MV_HP_ERRATA_XX42A0
= (1 << 5),
339 MV_HP_GEN_I
= (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II
= (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE
= (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE
= (1 << 9), /* PCIe bus/regs: 7042 */
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN
= (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN
= (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET
= (1 << 2), /* 1st hard reset complete? */
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
359 MV_DMA_BOUNDARY
= 0xffffU
,
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
364 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
381 /* Command ReQuest Block: 32B */
397 /* Command ResPonse Block: 8B */
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
412 struct mv_port_priv
{
413 struct mv_crqb
*crqb
;
415 struct mv_crpb
*crpb
;
417 struct mv_sg
*sg_tbl
[MV_MAX_Q_DEPTH
];
418 dma_addr_t sg_tbl_dma
[MV_MAX_Q_DEPTH
];
420 unsigned int req_idx
;
421 unsigned int resp_idx
;
426 struct mv_port_signal
{
431 struct mv_host_priv
{
433 struct mv_port_signal signal
[8];
434 const struct mv_hw_ops
*ops
;
437 void __iomem
*main_cause_reg_addr
;
438 void __iomem
*main_mask_reg_addr
;
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
447 struct dma_pool
*crqb_pool
;
448 struct dma_pool
*crpb_pool
;
449 struct dma_pool
*sg_tbl_pool
;
453 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
455 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
456 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
458 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
460 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
461 void (*reset_bus
)(struct ata_host
*host
, void __iomem
*mmio
);
464 static int mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
);
465 static int mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
466 static int mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
);
467 static int mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
468 static int mv_port_start(struct ata_port
*ap
);
469 static void mv_port_stop(struct ata_port
*ap
);
470 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
471 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
472 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
473 static void mv_error_handler(struct ata_port
*ap
);
474 static void mv_eh_freeze(struct ata_port
*ap
);
475 static void mv_eh_thaw(struct ata_port
*ap
);
476 static void mv6_dev_config(struct ata_device
*dev
);
478 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
480 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
481 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
483 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
485 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
486 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
488 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
490 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
491 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
493 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
495 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
496 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
498 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
500 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
501 void __iomem
*mmio
, unsigned int n_hc
);
502 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
504 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
505 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
);
506 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
507 unsigned int port_no
);
508 static void mv_edma_cfg(struct mv_port_priv
*pp
, struct mv_host_priv
*hpriv
,
509 void __iomem
*port_mmio
, int want_ncq
);
510 static int __mv_stop_dma(struct ata_port
*ap
);
512 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
516 static struct scsi_host_template mv5_sht
= {
517 .module
= THIS_MODULE
,
519 .ioctl
= ata_scsi_ioctl
,
520 .queuecommand
= ata_scsi_queuecmd
,
521 .can_queue
= ATA_DEF_QUEUE
,
522 .this_id
= ATA_SHT_THIS_ID
,
523 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
524 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
525 .emulated
= ATA_SHT_EMULATED
,
527 .proc_name
= DRV_NAME
,
528 .dma_boundary
= MV_DMA_BOUNDARY
,
529 .slave_configure
= ata_scsi_slave_config
,
530 .slave_destroy
= ata_scsi_slave_destroy
,
531 .bios_param
= ata_std_bios_param
,
534 static struct scsi_host_template mv6_sht
= {
535 .module
= THIS_MODULE
,
537 .ioctl
= ata_scsi_ioctl
,
538 .queuecommand
= ata_scsi_queuecmd
,
539 .change_queue_depth
= ata_scsi_change_queue_depth
,
540 .can_queue
= MV_MAX_Q_DEPTH
- 1,
541 .this_id
= ATA_SHT_THIS_ID
,
542 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
543 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
544 .emulated
= ATA_SHT_EMULATED
,
546 .proc_name
= DRV_NAME
,
547 .dma_boundary
= MV_DMA_BOUNDARY
,
548 .slave_configure
= ata_scsi_slave_config
,
549 .slave_destroy
= ata_scsi_slave_destroy
,
550 .bios_param
= ata_std_bios_param
,
553 static const struct ata_port_operations mv5_ops
= {
554 .tf_load
= ata_tf_load
,
555 .tf_read
= ata_tf_read
,
556 .check_status
= ata_check_status
,
557 .exec_command
= ata_exec_command
,
558 .dev_select
= ata_std_dev_select
,
560 .cable_detect
= ata_cable_sata
,
562 .qc_prep
= mv_qc_prep
,
563 .qc_issue
= mv_qc_issue
,
564 .data_xfer
= ata_data_xfer
,
566 .irq_clear
= ata_noop_irq_clear
,
567 .irq_on
= ata_irq_on
,
569 .error_handler
= mv_error_handler
,
570 .freeze
= mv_eh_freeze
,
573 .scr_read
= mv5_scr_read
,
574 .scr_write
= mv5_scr_write
,
576 .port_start
= mv_port_start
,
577 .port_stop
= mv_port_stop
,
580 static const struct ata_port_operations mv6_ops
= {
581 .dev_config
= mv6_dev_config
,
582 .tf_load
= ata_tf_load
,
583 .tf_read
= ata_tf_read
,
584 .check_status
= ata_check_status
,
585 .exec_command
= ata_exec_command
,
586 .dev_select
= ata_std_dev_select
,
588 .cable_detect
= ata_cable_sata
,
590 .qc_prep
= mv_qc_prep
,
591 .qc_issue
= mv_qc_issue
,
592 .data_xfer
= ata_data_xfer
,
594 .irq_clear
= ata_noop_irq_clear
,
595 .irq_on
= ata_irq_on
,
597 .error_handler
= mv_error_handler
,
598 .freeze
= mv_eh_freeze
,
600 .qc_defer
= ata_std_qc_defer
,
602 .scr_read
= mv_scr_read
,
603 .scr_write
= mv_scr_write
,
605 .port_start
= mv_port_start
,
606 .port_stop
= mv_port_stop
,
609 static const struct ata_port_operations mv_iie_ops
= {
610 .tf_load
= ata_tf_load
,
611 .tf_read
= ata_tf_read
,
612 .check_status
= ata_check_status
,
613 .exec_command
= ata_exec_command
,
614 .dev_select
= ata_std_dev_select
,
616 .cable_detect
= ata_cable_sata
,
618 .qc_prep
= mv_qc_prep_iie
,
619 .qc_issue
= mv_qc_issue
,
620 .data_xfer
= ata_data_xfer
,
622 .irq_clear
= ata_noop_irq_clear
,
623 .irq_on
= ata_irq_on
,
625 .error_handler
= mv_error_handler
,
626 .freeze
= mv_eh_freeze
,
628 .qc_defer
= ata_std_qc_defer
,
630 .scr_read
= mv_scr_read
,
631 .scr_write
= mv_scr_write
,
633 .port_start
= mv_port_start
,
634 .port_stop
= mv_port_stop
,
637 static const struct ata_port_info mv_port_info
[] = {
639 .flags
= MV_COMMON_FLAGS
,
640 .pio_mask
= 0x1f, /* pio0-4 */
641 .udma_mask
= ATA_UDMA6
,
642 .port_ops
= &mv5_ops
,
645 .flags
= MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
,
646 .pio_mask
= 0x1f, /* pio0-4 */
647 .udma_mask
= ATA_UDMA6
,
648 .port_ops
= &mv5_ops
,
651 .flags
= MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
,
652 .pio_mask
= 0x1f, /* pio0-4 */
653 .udma_mask
= ATA_UDMA6
,
654 .port_ops
= &mv5_ops
,
657 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
659 .pio_mask
= 0x1f, /* pio0-4 */
660 .udma_mask
= ATA_UDMA6
,
661 .port_ops
= &mv6_ops
,
664 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
665 ATA_FLAG_NCQ
| MV_FLAG_DUAL_HC
,
666 .pio_mask
= 0x1f, /* pio0-4 */
667 .udma_mask
= ATA_UDMA6
,
668 .port_ops
= &mv6_ops
,
671 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
673 .pio_mask
= 0x1f, /* pio0-4 */
674 .udma_mask
= ATA_UDMA6
,
675 .port_ops
= &mv_iie_ops
,
678 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
680 .pio_mask
= 0x1f, /* pio0-4 */
681 .udma_mask
= ATA_UDMA6
,
682 .port_ops
= &mv_iie_ops
,
685 .flags
= MV_COMMON_FLAGS
| MV_FLAG_SOC
,
686 .pio_mask
= 0x1f, /* pio0-4 */
687 .udma_mask
= ATA_UDMA6
,
688 .port_ops
= &mv_iie_ops
,
692 static const struct pci_device_id mv_pci_tbl
[] = {
693 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
694 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
695 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
696 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
697 /* RocketRAID 1740/174x have different identifiers */
698 { PCI_VDEVICE(TTI
, 0x1740), chip_508x
},
699 { PCI_VDEVICE(TTI
, 0x1742), chip_508x
},
701 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
702 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
703 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
704 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
705 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
707 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
710 { PCI_VDEVICE(ADAPTEC2
, 0x0243), chip_7042
},
712 /* Marvell 7042 support */
713 { PCI_VDEVICE(MARVELL
, 0x7042), chip_7042
},
715 /* Highpoint RocketRAID PCIe series */
716 { PCI_VDEVICE(TTI
, 0x2300), chip_7042
},
717 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
719 { } /* terminate list */
722 static const struct mv_hw_ops mv5xxx_ops
= {
723 .phy_errata
= mv5_phy_errata
,
724 .enable_leds
= mv5_enable_leds
,
725 .read_preamp
= mv5_read_preamp
,
726 .reset_hc
= mv5_reset_hc
,
727 .reset_flash
= mv5_reset_flash
,
728 .reset_bus
= mv5_reset_bus
,
731 static const struct mv_hw_ops mv6xxx_ops
= {
732 .phy_errata
= mv6_phy_errata
,
733 .enable_leds
= mv6_enable_leds
,
734 .read_preamp
= mv6_read_preamp
,
735 .reset_hc
= mv6_reset_hc
,
736 .reset_flash
= mv6_reset_flash
,
737 .reset_bus
= mv_reset_pci_bus
,
740 static const struct mv_hw_ops mv_soc_ops
= {
741 .phy_errata
= mv6_phy_errata
,
742 .enable_leds
= mv_soc_enable_leds
,
743 .read_preamp
= mv_soc_read_preamp
,
744 .reset_hc
= mv_soc_reset_hc
,
745 .reset_flash
= mv_soc_reset_flash
,
746 .reset_bus
= mv_soc_reset_bus
,
753 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
756 (void) readl(addr
); /* flush to avoid PCI posted write */
759 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
761 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
764 static inline unsigned int mv_hc_from_port(unsigned int port
)
766 return port
>> MV_PORT_HC_SHIFT
;
769 static inline unsigned int mv_hardport_from_port(unsigned int port
)
771 return port
& MV_PORT_MASK
;
774 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
777 return mv_hc_base(base
, mv_hc_from_port(port
));
780 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
782 return mv_hc_base_from_port(base
, port
) +
783 MV_SATAHC_ARBTR_REG_SZ
+
784 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
787 static inline void __iomem
*mv_host_base(struct ata_host
*host
)
789 struct mv_host_priv
*hpriv
= host
->private_data
;
793 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
795 return mv_port_base(mv_host_base(ap
->host
), ap
->port_no
);
798 static inline int mv_get_hc_count(unsigned long port_flags
)
800 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
803 static void mv_set_edma_ptrs(void __iomem
*port_mmio
,
804 struct mv_host_priv
*hpriv
,
805 struct mv_port_priv
*pp
)
810 * initialize request queue
812 index
= (pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_REQ_Q_PTR_SHIFT
;
814 WARN_ON(pp
->crqb_dma
& 0x3ff);
815 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
816 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | index
,
817 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
819 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
820 writelfl((pp
->crqb_dma
& 0xffffffff) | index
,
821 port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
823 writelfl(index
, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
826 * initialize response queue
828 index
= (pp
->resp_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_RSP_Q_PTR_SHIFT
;
830 WARN_ON(pp
->crpb_dma
& 0xff);
831 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
833 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
834 writelfl((pp
->crpb_dma
& 0xffffffff) | index
,
835 port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
837 writelfl(index
, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
839 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) | index
,
840 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
844 * mv_start_dma - Enable eDMA engine
845 * @base: port base address
846 * @pp: port private data
848 * Verify the local cache of the eDMA state is accurate with a
852 * Inherited from caller.
854 static void mv_start_dma(struct ata_port
*ap
, void __iomem
*port_mmio
,
855 struct mv_port_priv
*pp
, u8 protocol
)
857 int want_ncq
= (protocol
== ATA_PROT_NCQ
);
859 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
860 int using_ncq
= ((pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) != 0);
861 if (want_ncq
!= using_ncq
)
864 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
)) {
865 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
866 int hard_port
= mv_hardport_from_port(ap
->port_no
);
867 void __iomem
*hc_mmio
= mv_hc_base_from_port(
868 mv_host_base(ap
->host
), hard_port
);
869 u32 hc_irq_cause
, ipending
;
871 /* clear EDMA event indicators, if any */
872 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
874 /* clear EDMA interrupt indicator, if any */
875 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
876 ipending
= (DEV_IRQ
<< hard_port
) |
877 (CRPB_DMA_DONE
<< hard_port
);
878 if (hc_irq_cause
& ipending
) {
879 writelfl(hc_irq_cause
& ~ipending
,
880 hc_mmio
+ HC_IRQ_CAUSE_OFS
);
883 mv_edma_cfg(pp
, hpriv
, port_mmio
, want_ncq
);
885 /* clear FIS IRQ Cause */
886 writelfl(0, port_mmio
+ SATA_FIS_IRQ_CAUSE_OFS
);
888 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
890 writelfl(EDMA_EN
, port_mmio
+ EDMA_CMD_OFS
);
891 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
893 WARN_ON(!(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
)));
897 * __mv_stop_dma - Disable eDMA engine
898 * @ap: ATA channel to manipulate
900 * Verify the local cache of the eDMA state is accurate with a
904 * Inherited from caller.
906 static int __mv_stop_dma(struct ata_port
*ap
)
908 void __iomem
*port_mmio
= mv_ap_base(ap
);
909 struct mv_port_priv
*pp
= ap
->private_data
;
913 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
914 /* Disable EDMA if active. The disable bit auto clears.
916 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
917 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
919 WARN_ON(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
));
922 /* now properly wait for the eDMA to stop */
923 for (i
= 1000; i
> 0; i
--) {
924 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
925 if (!(reg
& EDMA_EN
))
932 ata_port_printk(ap
, KERN_ERR
, "Unable to stop eDMA\n");
939 static int mv_stop_dma(struct ata_port
*ap
)
944 spin_lock_irqsave(&ap
->host
->lock
, flags
);
945 rc
= __mv_stop_dma(ap
);
946 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
952 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
955 for (b
= 0; b
< bytes
; ) {
956 DPRINTK("%p: ", start
+ b
);
957 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
958 printk("%08x ", readl(start
+ b
));
966 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
971 for (b
= 0; b
< bytes
; ) {
972 DPRINTK("%02x: ", b
);
973 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
974 (void) pci_read_config_dword(pdev
, b
, &dw
);
982 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
983 struct pci_dev
*pdev
)
986 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
987 port
>> MV_PORT_HC_SHIFT
);
988 void __iomem
*port_base
;
989 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
992 start_hc
= start_port
= 0;
993 num_ports
= 8; /* shld be benign for 4 port devs */
996 start_hc
= port
>> MV_PORT_HC_SHIFT
;
998 num_ports
= num_hcs
= 1;
1000 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
1001 num_ports
> 1 ? num_ports
- 1 : start_port
);
1004 DPRINTK("PCI config space regs:\n");
1005 mv_dump_pci_cfg(pdev
, 0x68);
1007 DPRINTK("PCI regs:\n");
1008 mv_dump_mem(mmio_base
+0xc00, 0x3c);
1009 mv_dump_mem(mmio_base
+0xd00, 0x34);
1010 mv_dump_mem(mmio_base
+0xf00, 0x4);
1011 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
1012 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
1013 hc_base
= mv_hc_base(mmio_base
, hc
);
1014 DPRINTK("HC regs (HC %i):\n", hc
);
1015 mv_dump_mem(hc_base
, 0x1c);
1017 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
1018 port_base
= mv_port_base(mmio_base
, p
);
1019 DPRINTK("EDMA regs (port %i):\n", p
);
1020 mv_dump_mem(port_base
, 0x54);
1021 DPRINTK("SATA regs (port %i):\n", p
);
1022 mv_dump_mem(port_base
+0x300, 0x60);
1027 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
1031 switch (sc_reg_in
) {
1035 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
1038 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
1047 static int mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
)
1049 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1051 if (ofs
!= 0xffffffffU
) {
1052 *val
= readl(mv_ap_base(ap
) + ofs
);
1058 static int mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1060 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1062 if (ofs
!= 0xffffffffU
) {
1063 writelfl(val
, mv_ap_base(ap
) + ofs
);
1069 static void mv6_dev_config(struct ata_device
*adev
)
1072 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1073 * See mv_qc_prep() for more info.
1075 if (adev
->flags
& ATA_DFLAG_NCQ
)
1076 if (adev
->max_sectors
> ATA_MAX_SECTORS
)
1077 adev
->max_sectors
= ATA_MAX_SECTORS
;
1080 static void mv_edma_cfg(struct mv_port_priv
*pp
, struct mv_host_priv
*hpriv
,
1081 void __iomem
*port_mmio
, int want_ncq
)
1085 /* set up non-NCQ EDMA configuration */
1086 cfg
= EDMA_CFG_Q_DEPTH
; /* always 0x1f for *all* chips */
1088 if (IS_GEN_I(hpriv
))
1089 cfg
|= (1 << 8); /* enab config burst size mask */
1091 else if (IS_GEN_II(hpriv
))
1092 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
1094 else if (IS_GEN_IIE(hpriv
)) {
1095 cfg
|= (1 << 23); /* do not mask PM field in rx'd FIS */
1096 cfg
|= (1 << 22); /* enab 4-entry host queue cache */
1097 cfg
|= (1 << 18); /* enab early completion */
1098 cfg
|= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1102 cfg
|= EDMA_CFG_NCQ
;
1103 pp
->pp_flags
|= MV_PP_FLAG_NCQ_EN
;
1105 pp
->pp_flags
&= ~MV_PP_FLAG_NCQ_EN
;
1107 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
1110 static void mv_port_free_dma_mem(struct ata_port
*ap
)
1112 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1113 struct mv_port_priv
*pp
= ap
->private_data
;
1117 dma_pool_free(hpriv
->crqb_pool
, pp
->crqb
, pp
->crqb_dma
);
1121 dma_pool_free(hpriv
->crpb_pool
, pp
->crpb
, pp
->crpb_dma
);
1125 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1126 * For later hardware, we have one unique sg_tbl per NCQ tag.
1128 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1129 if (pp
->sg_tbl
[tag
]) {
1130 if (tag
== 0 || !IS_GEN_I(hpriv
))
1131 dma_pool_free(hpriv
->sg_tbl_pool
,
1133 pp
->sg_tbl_dma
[tag
]);
1134 pp
->sg_tbl
[tag
] = NULL
;
1140 * mv_port_start - Port specific init/start routine.
1141 * @ap: ATA channel to manipulate
1143 * Allocate and point to DMA memory, init port private memory,
1147 * Inherited from caller.
1149 static int mv_port_start(struct ata_port
*ap
)
1151 struct device
*dev
= ap
->host
->dev
;
1152 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1153 struct mv_port_priv
*pp
;
1154 void __iomem
*port_mmio
= mv_ap_base(ap
);
1155 unsigned long flags
;
1158 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1161 ap
->private_data
= pp
;
1163 pp
->crqb
= dma_pool_alloc(hpriv
->crqb_pool
, GFP_KERNEL
, &pp
->crqb_dma
);
1166 memset(pp
->crqb
, 0, MV_CRQB_Q_SZ
);
1168 pp
->crpb
= dma_pool_alloc(hpriv
->crpb_pool
, GFP_KERNEL
, &pp
->crpb_dma
);
1170 goto out_port_free_dma_mem
;
1171 memset(pp
->crpb
, 0, MV_CRPB_Q_SZ
);
1174 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1175 * For later hardware, we need one unique sg_tbl per NCQ tag.
1177 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1178 if (tag
== 0 || !IS_GEN_I(hpriv
)) {
1179 pp
->sg_tbl
[tag
] = dma_pool_alloc(hpriv
->sg_tbl_pool
,
1180 GFP_KERNEL
, &pp
->sg_tbl_dma
[tag
]);
1181 if (!pp
->sg_tbl
[tag
])
1182 goto out_port_free_dma_mem
;
1184 pp
->sg_tbl
[tag
] = pp
->sg_tbl
[0];
1185 pp
->sg_tbl_dma
[tag
] = pp
->sg_tbl_dma
[0];
1189 spin_lock_irqsave(&ap
->host
->lock
, flags
);
1191 mv_edma_cfg(pp
, hpriv
, port_mmio
, 0);
1192 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
1194 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
1196 /* Don't turn on EDMA here...do it before DMA commands only. Else
1197 * we'll be unable to send non-data, PIO, etc due to restricted access
1202 out_port_free_dma_mem
:
1203 mv_port_free_dma_mem(ap
);
1208 * mv_port_stop - Port specific cleanup/stop routine.
1209 * @ap: ATA channel to manipulate
1211 * Stop DMA, cleanup port memory.
1214 * This routine uses the host lock to protect the DMA stop.
1216 static void mv_port_stop(struct ata_port
*ap
)
1219 mv_port_free_dma_mem(ap
);
1223 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1224 * @qc: queued command whose SG list to source from
1226 * Populate the SG list and mark the last entry.
1229 * Inherited from caller.
1231 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
1233 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1234 struct scatterlist
*sg
;
1235 struct mv_sg
*mv_sg
, *last_sg
= NULL
;
1238 mv_sg
= pp
->sg_tbl
[qc
->tag
];
1239 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1240 dma_addr_t addr
= sg_dma_address(sg
);
1241 u32 sg_len
= sg_dma_len(sg
);
1244 u32 offset
= addr
& 0xffff;
1247 if ((offset
+ sg_len
> 0x10000))
1248 len
= 0x10000 - offset
;
1250 mv_sg
->addr
= cpu_to_le32(addr
& 0xffffffff);
1251 mv_sg
->addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1252 mv_sg
->flags_size
= cpu_to_le32(len
& 0xffff);
1262 if (likely(last_sg
))
1263 last_sg
->flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1266 static void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1268 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1269 (last
? CRQB_CMD_LAST
: 0);
1270 *cmdw
= cpu_to_le16(tmp
);
1274 * mv_qc_prep - Host specific command preparation.
1275 * @qc: queued command to prepare
1277 * This routine simply redirects to the general purpose routine
1278 * if command is not DMA. Else, it handles prep of the CRQB
1279 * (command request block), does some sanity checking, and calls
1280 * the SG load routine.
1283 * Inherited from caller.
1285 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1287 struct ata_port
*ap
= qc
->ap
;
1288 struct mv_port_priv
*pp
= ap
->private_data
;
1290 struct ata_taskfile
*tf
;
1294 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1295 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1298 /* Fill in command request block
1300 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1301 flags
|= CRQB_FLAG_READ
;
1302 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1303 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1305 /* get current queue index from software */
1306 in_index
= pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
;
1308 pp
->crqb
[in_index
].sg_addr
=
1309 cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1310 pp
->crqb
[in_index
].sg_addr_hi
=
1311 cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1312 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
1314 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
1317 /* Sadly, the CRQB cannot accomodate all registers--there are
1318 * only 11 bytes...so we must pick and choose required
1319 * registers based on the command. So, we drop feature and
1320 * hob_feature for [RW] DMA commands, but they are needed for
1321 * NCQ. NCQ will drop hob_nsect.
1323 switch (tf
->command
) {
1325 case ATA_CMD_READ_EXT
:
1327 case ATA_CMD_WRITE_EXT
:
1328 case ATA_CMD_WRITE_FUA_EXT
:
1329 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1331 case ATA_CMD_FPDMA_READ
:
1332 case ATA_CMD_FPDMA_WRITE
:
1333 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1334 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1337 /* The only other commands EDMA supports in non-queued and
1338 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1339 * of which are defined/used by Linux. If we get here, this
1340 * driver needs work.
1342 * FIXME: modify libata to give qc_prep a return value and
1343 * return error here.
1345 BUG_ON(tf
->command
);
1348 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1349 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1350 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1351 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1352 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1353 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1354 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1355 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1356 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1358 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1364 * mv_qc_prep_iie - Host specific command preparation.
1365 * @qc: queued command to prepare
1367 * This routine simply redirects to the general purpose routine
1368 * if command is not DMA. Else, it handles prep of the CRQB
1369 * (command request block), does some sanity checking, and calls
1370 * the SG load routine.
1373 * Inherited from caller.
1375 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1377 struct ata_port
*ap
= qc
->ap
;
1378 struct mv_port_priv
*pp
= ap
->private_data
;
1379 struct mv_crqb_iie
*crqb
;
1380 struct ata_taskfile
*tf
;
1384 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1385 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1388 /* Fill in Gen IIE command request block
1390 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1391 flags
|= CRQB_FLAG_READ
;
1393 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1394 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1395 flags
|= qc
->tag
<< CRQB_HOSTQ_SHIFT
;
1397 /* get current queue index from software */
1398 in_index
= pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
;
1400 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
1401 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1402 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1403 crqb
->flags
= cpu_to_le32(flags
);
1406 crqb
->ata_cmd
[0] = cpu_to_le32(
1407 (tf
->command
<< 16) |
1410 crqb
->ata_cmd
[1] = cpu_to_le32(
1416 crqb
->ata_cmd
[2] = cpu_to_le32(
1417 (tf
->hob_lbal
<< 0) |
1418 (tf
->hob_lbam
<< 8) |
1419 (tf
->hob_lbah
<< 16) |
1420 (tf
->hob_feature
<< 24)
1422 crqb
->ata_cmd
[3] = cpu_to_le32(
1424 (tf
->hob_nsect
<< 8)
1427 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1433 * mv_qc_issue - Initiate a command to the host
1434 * @qc: queued command to start
1436 * This routine simply redirects to the general purpose routine
1437 * if command is not DMA. Else, it sanity checks our local
1438 * caches of the request producer/consumer indices then enables
1439 * DMA and bumps the request producer index.
1442 * Inherited from caller.
1444 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1446 struct ata_port
*ap
= qc
->ap
;
1447 void __iomem
*port_mmio
= mv_ap_base(ap
);
1448 struct mv_port_priv
*pp
= ap
->private_data
;
1451 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1452 (qc
->tf
.protocol
!= ATA_PROT_NCQ
)) {
1453 /* We're about to send a non-EDMA capable command to the
1454 * port. Turn off EDMA so there won't be problems accessing
1455 * shadow block, etc registers.
1458 return ata_qc_issue_prot(qc
);
1461 mv_start_dma(ap
, port_mmio
, pp
, qc
->tf
.protocol
);
1465 in_index
= (pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_REQ_Q_PTR_SHIFT
;
1467 /* and write the request in pointer to kick the EDMA to life */
1468 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | in_index
,
1469 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1475 * mv_err_intr - Handle error interrupts on the port
1476 * @ap: ATA channel to manipulate
1477 * @reset_allowed: bool: 0 == don't trigger from reset here
1479 * In most cases, just clear the interrupt and move on. However,
1480 * some cases require an eDMA reset, which is done right before
1481 * the COMRESET in mv_phy_reset(). The SERR case requires a
1482 * clear of pending errors in the SATA SERROR register. Finally,
1483 * if the port disabled DMA, update our cached copy to match.
1486 * Inherited from caller.
1488 static void mv_err_intr(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
1490 void __iomem
*port_mmio
= mv_ap_base(ap
);
1491 u32 edma_err_cause
, eh_freeze_mask
, serr
= 0;
1492 struct mv_port_priv
*pp
= ap
->private_data
;
1493 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1494 unsigned int edma_enabled
= (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
1495 unsigned int action
= 0, err_mask
= 0;
1496 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1498 ata_ehi_clear_desc(ehi
);
1500 if (!edma_enabled
) {
1501 /* just a guess: do we need to do this? should we
1502 * expand this, and do it in all cases?
1504 sata_scr_read(&ap
->link
, SCR_ERROR
, &serr
);
1505 sata_scr_write_flush(&ap
->link
, SCR_ERROR
, serr
);
1508 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1510 ata_ehi_push_desc(ehi
, "edma_err 0x%08x", edma_err_cause
);
1513 * all generations share these EDMA error cause bits
1516 if (edma_err_cause
& EDMA_ERR_DEV
)
1517 err_mask
|= AC_ERR_DEV
;
1518 if (edma_err_cause
& (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
1519 EDMA_ERR_CRQB_PAR
| EDMA_ERR_CRPB_PAR
|
1520 EDMA_ERR_INTRL_PAR
)) {
1521 err_mask
|= AC_ERR_ATA_BUS
;
1522 action
|= ATA_EH_RESET
;
1523 ata_ehi_push_desc(ehi
, "parity error");
1525 if (edma_err_cause
& (EDMA_ERR_DEV_DCON
| EDMA_ERR_DEV_CON
)) {
1526 ata_ehi_hotplugged(ehi
);
1527 ata_ehi_push_desc(ehi
, edma_err_cause
& EDMA_ERR_DEV_DCON
?
1528 "dev disconnect" : "dev connect");
1529 action
|= ATA_EH_RESET
;
1532 if (IS_GEN_I(hpriv
)) {
1533 eh_freeze_mask
= EDMA_EH_FREEZE_5
;
1535 if (edma_err_cause
& EDMA_ERR_SELF_DIS_5
) {
1536 pp
= ap
->private_data
;
1537 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1538 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1541 eh_freeze_mask
= EDMA_EH_FREEZE
;
1543 if (edma_err_cause
& EDMA_ERR_SELF_DIS
) {
1544 pp
= ap
->private_data
;
1545 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1546 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1549 if (edma_err_cause
& EDMA_ERR_SERR
) {
1550 sata_scr_read(&ap
->link
, SCR_ERROR
, &serr
);
1551 sata_scr_write_flush(&ap
->link
, SCR_ERROR
, serr
);
1552 err_mask
= AC_ERR_ATA_BUS
;
1553 action
|= ATA_EH_RESET
;
1557 /* Clear EDMA now that SERR cleanup done */
1558 writelfl(~edma_err_cause
, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1561 err_mask
= AC_ERR_OTHER
;
1562 action
|= ATA_EH_RESET
;
1565 ehi
->serror
|= serr
;
1566 ehi
->action
|= action
;
1569 qc
->err_mask
|= err_mask
;
1571 ehi
->err_mask
|= err_mask
;
1573 if (edma_err_cause
& eh_freeze_mask
)
1574 ata_port_freeze(ap
);
1579 static void mv_intr_pio(struct ata_port
*ap
)
1581 struct ata_queued_cmd
*qc
;
1584 /* ignore spurious intr if drive still BUSY */
1585 ata_status
= readb(ap
->ioaddr
.status_addr
);
1586 if (unlikely(ata_status
& ATA_BUSY
))
1589 /* get active ATA command */
1590 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1591 if (unlikely(!qc
)) /* no active tag */
1593 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
) /* polling; we don't own qc */
1596 /* and finally, complete the ATA command */
1597 qc
->err_mask
|= ac_err_mask(ata_status
);
1598 ata_qc_complete(qc
);
1601 static void mv_intr_edma(struct ata_port
*ap
)
1603 void __iomem
*port_mmio
= mv_ap_base(ap
);
1604 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1605 struct mv_port_priv
*pp
= ap
->private_data
;
1606 struct ata_queued_cmd
*qc
;
1607 u32 out_index
, in_index
;
1608 bool work_done
= false;
1610 /* get h/w response queue pointer */
1611 in_index
= (readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
)
1612 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1618 /* get s/w response queue last-read pointer, and compare */
1619 out_index
= pp
->resp_idx
& MV_MAX_Q_DEPTH_MASK
;
1620 if (in_index
== out_index
)
1623 /* 50xx: get active ATA command */
1624 if (IS_GEN_I(hpriv
))
1625 tag
= ap
->link
.active_tag
;
1627 /* Gen II/IIE: get active ATA command via tag, to enable
1628 * support for queueing. this works transparently for
1629 * queued and non-queued modes.
1632 tag
= le16_to_cpu(pp
->crpb
[out_index
].id
) & 0x1f;
1634 qc
= ata_qc_from_tag(ap
, tag
);
1636 /* For non-NCQ mode, the lower 8 bits of status
1637 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1638 * which should be zero if all went well.
1640 status
= le16_to_cpu(pp
->crpb
[out_index
].flags
);
1641 if ((status
& 0xff) && !(pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
)) {
1642 mv_err_intr(ap
, qc
);
1646 /* and finally, complete the ATA command */
1649 ac_err_mask(status
>> CRPB_FLAG_STATUS_SHIFT
);
1650 ata_qc_complete(qc
);
1653 /* advance software response queue pointer, to
1654 * indicate (after the loop completes) to hardware
1655 * that we have consumed a response queue entry.
1662 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) |
1663 (out_index
<< EDMA_RSP_Q_PTR_SHIFT
),
1664 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1668 * mv_host_intr - Handle all interrupts on the given host controller
1669 * @host: host specific structure
1670 * @relevant: port error bits relevant to this host controller
1671 * @hc: which host controller we're to look at
1673 * Read then write clear the HC interrupt status then walk each
1674 * port connected to the HC and see if it needs servicing. Port
1675 * success ints are reported in the HC interrupt status reg, the
1676 * port error ints are reported in the higher level main
1677 * interrupt status register and thus are passed in via the
1678 * 'relevant' argument.
1681 * Inherited from caller.
1683 static void mv_host_intr(struct ata_host
*host
, u32 relevant
, unsigned int hc
)
1685 struct mv_host_priv
*hpriv
= host
->private_data
;
1686 void __iomem
*mmio
= hpriv
->base
;
1687 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1689 int port
, port0
, last_port
;
1694 port0
= MV_PORTS_PER_HC
;
1697 last_port
= port0
+ MV_PORTS_PER_HC
;
1699 last_port
= port0
+ hpriv
->n_ports
;
1700 /* we'll need the HC success int register in most cases */
1701 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1705 writelfl(~hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1707 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1708 hc
, relevant
, hc_irq_cause
);
1710 for (port
= port0
; port
< last_port
; port
++) {
1711 struct ata_port
*ap
= host
->ports
[port
];
1712 struct mv_port_priv
*pp
;
1713 int have_err_bits
, hard_port
, shift
;
1715 if ((!ap
) || (ap
->flags
& ATA_FLAG_DISABLED
))
1718 pp
= ap
->private_data
;
1720 shift
= port
<< 1; /* (port * 2) */
1721 if (port
>= MV_PORTS_PER_HC
) {
1722 shift
++; /* skip bit 8 in the HC Main IRQ reg */
1724 have_err_bits
= ((PORT0_ERR
<< shift
) & relevant
);
1726 if (unlikely(have_err_bits
)) {
1727 struct ata_queued_cmd
*qc
;
1729 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1730 if (qc
&& (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1733 mv_err_intr(ap
, qc
);
1737 hard_port
= mv_hardport_from_port(port
); /* range 0..3 */
1739 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
1740 if ((CRPB_DMA_DONE
<< hard_port
) & hc_irq_cause
)
1743 if ((DEV_IRQ
<< hard_port
) & hc_irq_cause
)
1750 static void mv_pci_error(struct ata_host
*host
, void __iomem
*mmio
)
1752 struct mv_host_priv
*hpriv
= host
->private_data
;
1753 struct ata_port
*ap
;
1754 struct ata_queued_cmd
*qc
;
1755 struct ata_eh_info
*ehi
;
1756 unsigned int i
, err_mask
, printed
= 0;
1759 err_cause
= readl(mmio
+ hpriv
->irq_cause_ofs
);
1761 dev_printk(KERN_ERR
, host
->dev
, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1764 DPRINTK("All regs @ PCI error\n");
1765 mv_dump_all_regs(mmio
, -1, to_pci_dev(host
->dev
));
1767 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
1769 for (i
= 0; i
< host
->n_ports
; i
++) {
1770 ap
= host
->ports
[i
];
1771 if (!ata_link_offline(&ap
->link
)) {
1772 ehi
= &ap
->link
.eh_info
;
1773 ata_ehi_clear_desc(ehi
);
1775 ata_ehi_push_desc(ehi
,
1776 "PCI err cause 0x%08x", err_cause
);
1777 err_mask
= AC_ERR_HOST_BUS
;
1778 ehi
->action
= ATA_EH_RESET
;
1779 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1781 qc
->err_mask
|= err_mask
;
1783 ehi
->err_mask
|= err_mask
;
1785 ata_port_freeze(ap
);
1791 * mv_interrupt - Main interrupt event handler
1793 * @dev_instance: private data; in this case the host structure
1795 * Read the read only register to determine if any host
1796 * controllers have pending interrupts. If so, call lower level
1797 * routine to handle. Also check for PCI errors which are only
1801 * This routine holds the host lock while processing pending
1804 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
1806 struct ata_host
*host
= dev_instance
;
1807 struct mv_host_priv
*hpriv
= host
->private_data
;
1808 unsigned int hc
, handled
= 0, n_hcs
;
1809 void __iomem
*mmio
= hpriv
->base
;
1810 u32 irq_stat
, irq_mask
;
1812 spin_lock(&host
->lock
);
1814 irq_stat
= readl(hpriv
->main_cause_reg_addr
);
1815 irq_mask
= readl(hpriv
->main_mask_reg_addr
);
1817 /* check the cases where we either have nothing pending or have read
1818 * a bogus register value which can indicate HW removal or PCI fault
1820 if (!(irq_stat
& irq_mask
) || (0xffffffffU
== irq_stat
))
1823 n_hcs
= mv_get_hc_count(host
->ports
[0]->flags
);
1825 if (unlikely((irq_stat
& PCI_ERR
) && HAS_PCI(host
))) {
1826 mv_pci_error(host
, mmio
);
1828 goto out_unlock
; /* skip all other HC irq handling */
1831 for (hc
= 0; hc
< n_hcs
; hc
++) {
1832 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
1834 mv_host_intr(host
, relevant
, hc
);
1840 spin_unlock(&host
->lock
);
1842 return IRQ_RETVAL(handled
);
1845 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
1847 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
1848 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
1850 return hc_mmio
+ ofs
;
1853 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
1857 switch (sc_reg_in
) {
1861 ofs
= sc_reg_in
* sizeof(u32
);
1870 static int mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
)
1872 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1873 void __iomem
*mmio
= hpriv
->base
;
1874 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1875 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1877 if (ofs
!= 0xffffffffU
) {
1878 *val
= readl(addr
+ ofs
);
1884 static int mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1886 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1887 void __iomem
*mmio
= hpriv
->base
;
1888 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1889 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1891 if (ofs
!= 0xffffffffU
) {
1892 writelfl(val
, addr
+ ofs
);
1898 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
1900 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1903 early_5080
= (pdev
->device
== 0x5080) && (pdev
->revision
== 0);
1906 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1908 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1911 mv_reset_pci_bus(host
, mmio
);
1914 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1916 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL
);
1919 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1922 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
1925 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1927 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
1928 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
1931 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1935 writel(0, mmio
+ MV_GPIO_PORT_CTL
);
1937 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1939 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1941 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1944 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1947 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
1948 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1950 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
1953 tmp
= readl(phy_mmio
+ MV5_LT_MODE
);
1955 writel(tmp
, phy_mmio
+ MV5_LT_MODE
);
1957 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
1960 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
1963 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1965 tmp
|= hpriv
->signal
[port
].pre
;
1966 tmp
|= hpriv
->signal
[port
].amps
;
1967 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
1972 #define ZERO(reg) writel(0, port_mmio + (reg))
1973 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1976 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1978 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1980 mv_channel_reset(hpriv
, mmio
, port
);
1982 ZERO(0x028); /* command */
1983 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
1984 ZERO(0x004); /* timer */
1985 ZERO(0x008); /* irq err cause */
1986 ZERO(0x00c); /* irq err mask */
1987 ZERO(0x010); /* rq bah */
1988 ZERO(0x014); /* rq inp */
1989 ZERO(0x018); /* rq outp */
1990 ZERO(0x01c); /* respq bah */
1991 ZERO(0x024); /* respq outp */
1992 ZERO(0x020); /* respq inp */
1993 ZERO(0x02c); /* test control */
1994 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
1998 #define ZERO(reg) writel(0, hc_mmio + (reg))
1999 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2002 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2010 tmp
= readl(hc_mmio
+ 0x20);
2013 writel(tmp
, hc_mmio
+ 0x20);
2017 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2020 unsigned int hc
, port
;
2022 for (hc
= 0; hc
< n_hc
; hc
++) {
2023 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
2024 mv5_reset_hc_port(hpriv
, mmio
,
2025 (hc
* MV_PORTS_PER_HC
) + port
);
2027 mv5_reset_one_hc(hpriv
, mmio
, hc
);
2034 #define ZERO(reg) writel(0, mmio + (reg))
2035 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
)
2037 struct mv_host_priv
*hpriv
= host
->private_data
;
2040 tmp
= readl(mmio
+ MV_PCI_MODE
);
2042 writel(tmp
, mmio
+ MV_PCI_MODE
);
2044 ZERO(MV_PCI_DISC_TIMER
);
2045 ZERO(MV_PCI_MSI_TRIGGER
);
2046 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
2047 ZERO(HC_MAIN_IRQ_MASK_OFS
);
2048 ZERO(MV_PCI_SERR_MASK
);
2049 ZERO(hpriv
->irq_cause_ofs
);
2050 ZERO(hpriv
->irq_mask_ofs
);
2051 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
2052 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
2053 ZERO(MV_PCI_ERR_ATTRIBUTE
);
2054 ZERO(MV_PCI_ERR_COMMAND
);
2058 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2062 mv5_reset_flash(hpriv
, mmio
);
2064 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL
);
2066 tmp
|= (1 << 5) | (1 << 6);
2067 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL
);
2071 * mv6_reset_hc - Perform the 6xxx global soft reset
2072 * @mmio: base address of the HBA
2074 * This routine only applies to 6xxx parts.
2077 * Inherited from caller.
2079 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2082 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
2086 /* Following procedure defined in PCI "main command and status
2090 writel(t
| STOP_PCI_MASTER
, reg
);
2092 for (i
= 0; i
< 1000; i
++) {
2095 if (PCI_MASTER_EMPTY
& t
)
2098 if (!(PCI_MASTER_EMPTY
& t
)) {
2099 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
2107 writel(t
| GLOB_SFT_RST
, reg
);
2110 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
2112 if (!(GLOB_SFT_RST
& t
)) {
2113 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
2118 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2121 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
2124 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
2126 if (GLOB_SFT_RST
& t
) {
2127 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
2134 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2137 void __iomem
*port_mmio
;
2140 tmp
= readl(mmio
+ MV_RESET_CFG
);
2141 if ((tmp
& (1 << 0)) == 0) {
2142 hpriv
->signal
[idx
].amps
= 0x7 << 8;
2143 hpriv
->signal
[idx
].pre
= 0x1 << 5;
2147 port_mmio
= mv_port_base(mmio
, idx
);
2148 tmp
= readl(port_mmio
+ PHY_MODE2
);
2150 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
2151 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
2154 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2156 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL
);
2159 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2162 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2164 u32 hp_flags
= hpriv
->hp_flags
;
2166 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2168 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2171 if (fix_phy_mode2
) {
2172 m2
= readl(port_mmio
+ PHY_MODE2
);
2175 writel(m2
, port_mmio
+ PHY_MODE2
);
2179 m2
= readl(port_mmio
+ PHY_MODE2
);
2180 m2
&= ~((1 << 16) | (1 << 31));
2181 writel(m2
, port_mmio
+ PHY_MODE2
);
2186 /* who knows what this magic does */
2187 tmp
= readl(port_mmio
+ PHY_MODE3
);
2190 writel(tmp
, port_mmio
+ PHY_MODE3
);
2192 if (fix_phy_mode4
) {
2195 m4
= readl(port_mmio
+ PHY_MODE4
);
2197 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
2198 tmp
= readl(port_mmio
+ 0x310);
2200 m4
= (m4
& ~(1 << 1)) | (1 << 0);
2202 writel(m4
, port_mmio
+ PHY_MODE4
);
2204 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
2205 writel(tmp
, port_mmio
+ 0x310);
2208 /* Revert values of pre-emphasis and signal amps to the saved ones */
2209 m2
= readl(port_mmio
+ PHY_MODE2
);
2211 m2
&= ~MV_M2_PREAMP_MASK
;
2212 m2
|= hpriv
->signal
[port
].amps
;
2213 m2
|= hpriv
->signal
[port
].pre
;
2216 /* according to mvSata 3.6.1, some IIE values are fixed */
2217 if (IS_GEN_IIE(hpriv
)) {
2222 writel(m2
, port_mmio
+ PHY_MODE2
);
2225 /* TODO: use the generic LED interface to configure the SATA Presence */
2226 /* & Acitivy LEDs on the board */
2227 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
2233 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2236 void __iomem
*port_mmio
;
2239 port_mmio
= mv_port_base(mmio
, idx
);
2240 tmp
= readl(port_mmio
+ PHY_MODE2
);
2242 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
2243 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
2247 #define ZERO(reg) writel(0, port_mmio + (reg))
2248 static void mv_soc_reset_hc_port(struct mv_host_priv
*hpriv
,
2249 void __iomem
*mmio
, unsigned int port
)
2251 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2253 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
2255 mv_channel_reset(hpriv
, mmio
, port
);
2257 ZERO(0x028); /* command */
2258 writel(0x101f, port_mmio
+ EDMA_CFG_OFS
);
2259 ZERO(0x004); /* timer */
2260 ZERO(0x008); /* irq err cause */
2261 ZERO(0x00c); /* irq err mask */
2262 ZERO(0x010); /* rq bah */
2263 ZERO(0x014); /* rq inp */
2264 ZERO(0x018); /* rq outp */
2265 ZERO(0x01c); /* respq bah */
2266 ZERO(0x024); /* respq outp */
2267 ZERO(0x020); /* respq inp */
2268 ZERO(0x02c); /* test control */
2269 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
2274 #define ZERO(reg) writel(0, hc_mmio + (reg))
2275 static void mv_soc_reset_one_hc(struct mv_host_priv
*hpriv
,
2278 void __iomem
*hc_mmio
= mv_hc_base(mmio
, 0);
2288 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
2289 void __iomem
*mmio
, unsigned int n_hc
)
2293 for (port
= 0; port
< hpriv
->n_ports
; port
++)
2294 mv_soc_reset_hc_port(hpriv
, mmio
, port
);
2296 mv_soc_reset_one_hc(hpriv
, mmio
);
2301 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
2307 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
2312 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2313 unsigned int port_no
)
2315 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
2317 writelfl(ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
2319 if (IS_GEN_II(hpriv
)) {
2320 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2321 ifctl
|= (1 << 7); /* enable gen2i speed */
2322 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2323 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2326 udelay(25); /* allow reset propagation */
2328 /* Spec never mentions clearing the bit. Marvell's driver does
2329 * clear the bit, however.
2331 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
2333 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
2335 if (IS_GEN_I(hpriv
))
2340 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2341 * @ap: ATA channel to manipulate
2343 * Part of this is taken from __sata_phy_reset and modified to
2344 * not sleep since this routine gets called from interrupt level.
2347 * Inherited from caller. This is coded to safe to call at
2348 * interrupt level, i.e. it does not sleep.
2350 static void mv_phy_reset(struct ata_port
*ap
, unsigned int *class,
2351 unsigned long deadline
)
2353 struct mv_port_priv
*pp
= ap
->private_data
;
2354 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2355 void __iomem
*port_mmio
= mv_ap_base(ap
);
2359 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
2363 u32 sstatus
, serror
, scontrol
;
2365 mv_scr_read(ap
, SCR_STATUS
, &sstatus
);
2366 mv_scr_read(ap
, SCR_ERROR
, &serror
);
2367 mv_scr_read(ap
, SCR_CONTROL
, &scontrol
);
2368 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2369 "SCtrl 0x%08x\n", sstatus
, serror
, scontrol
);
2373 /* Issue COMRESET via SControl */
2375 sata_scr_write_flush(&ap
->link
, SCR_CONTROL
, 0x301);
2378 sata_scr_write_flush(&ap
->link
, SCR_CONTROL
, 0x300);
2382 sata_scr_read(&ap
->link
, SCR_STATUS
, &sstatus
);
2383 if (((sstatus
& 0x3) == 3) || ((sstatus
& 0x3) == 0))
2387 } while (time_before(jiffies
, deadline
));
2389 /* work around errata */
2390 if (IS_GEN_II(hpriv
) &&
2391 (sstatus
!= 0x0) && (sstatus
!= 0x113) && (sstatus
!= 0x123) &&
2393 goto comreset_retry
;
2397 u32 sstatus
, serror
, scontrol
;
2399 mv_scr_read(ap
, SCR_STATUS
, &sstatus
);
2400 mv_scr_read(ap
, SCR_ERROR
, &serror
);
2401 mv_scr_read(ap
, SCR_CONTROL
, &scontrol
);
2402 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2403 "SCtrl 0x%08x\n", sstatus
, serror
, scontrol
);
2407 if (ata_link_offline(&ap
->link
)) {
2408 *class = ATA_DEV_NONE
;
2412 /* even after SStatus reflects that device is ready,
2413 * it seems to take a while for link to be fully
2414 * established (and thus Status no longer 0x80/0x7F),
2415 * so we poll a bit for that, here.
2419 u8 drv_stat
= ata_check_status(ap
);
2420 if ((drv_stat
!= 0x80) && (drv_stat
!= 0x7f))
2425 if (time_after(jiffies
, deadline
))
2429 /* FIXME: if we passed the deadline, the following
2430 * code probably produces an invalid result
2433 /* finally, read device signature from TF registers */
2434 *class = ata_dev_try_classify(ap
->link
.device
, 1, NULL
);
2436 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2438 WARN_ON(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
2443 static int mv_prereset(struct ata_link
*link
, unsigned long deadline
)
2445 struct ata_port
*ap
= link
->ap
;
2446 struct mv_port_priv
*pp
= ap
->private_data
;
2450 if (!(pp
->pp_flags
& MV_PP_FLAG_HAD_A_RESET
))
2451 pp
->pp_flags
|= MV_PP_FLAG_HAD_A_RESET
;
2456 static int mv_hardreset(struct ata_link
*link
, unsigned int *class,
2457 unsigned long deadline
)
2459 struct ata_port
*ap
= link
->ap
;
2460 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2461 void __iomem
*mmio
= hpriv
->base
;
2465 mv_channel_reset(hpriv
, mmio
, ap
->port_no
);
2467 mv_phy_reset(ap
, class, deadline
);
2472 static void mv_postreset(struct ata_link
*link
, unsigned int *classes
)
2474 struct ata_port
*ap
= link
->ap
;
2477 /* print link status */
2478 sata_print_link_status(link
);
2481 sata_scr_read(link
, SCR_ERROR
, &serr
);
2482 sata_scr_write_flush(link
, SCR_ERROR
, serr
);
2484 /* bail out if no device is present */
2485 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
2486 DPRINTK("EXIT, no device\n");
2490 /* set up device control */
2491 iowrite8(ap
->ctl
, ap
->ioaddr
.ctl_addr
);
2494 static void mv_error_handler(struct ata_port
*ap
)
2496 ata_do_eh(ap
, mv_prereset
, ata_std_softreset
,
2497 mv_hardreset
, mv_postreset
);
2500 static void mv_eh_freeze(struct ata_port
*ap
)
2502 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2503 unsigned int hc
= (ap
->port_no
> 3) ? 1 : 0;
2507 /* FIXME: handle coalescing completion events properly */
2509 shift
= ap
->port_no
* 2;
2513 mask
= 0x3 << shift
;
2515 /* disable assertion of portN err, done events */
2516 tmp
= readl(hpriv
->main_mask_reg_addr
);
2517 writelfl(tmp
& ~mask
, hpriv
->main_mask_reg_addr
);
2520 static void mv_eh_thaw(struct ata_port
*ap
)
2522 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2523 void __iomem
*mmio
= hpriv
->base
;
2524 unsigned int hc
= (ap
->port_no
> 3) ? 1 : 0;
2525 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2526 void __iomem
*port_mmio
= mv_ap_base(ap
);
2527 u32 tmp
, mask
, hc_irq_cause
;
2528 unsigned int shift
, hc_port_no
= ap
->port_no
;
2530 /* FIXME: handle coalescing completion events properly */
2532 shift
= ap
->port_no
* 2;
2538 mask
= 0x3 << shift
;
2540 /* clear EDMA errors on this port */
2541 writel(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2543 /* clear pending irq events */
2544 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2545 hc_irq_cause
&= ~(1 << hc_port_no
); /* clear CRPB-done */
2546 hc_irq_cause
&= ~(1 << (hc_port_no
+ 8)); /* clear Device int */
2547 writel(hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2549 /* enable assertion of portN err, done events */
2550 tmp
= readl(hpriv
->main_mask_reg_addr
);
2551 writelfl(tmp
| mask
, hpriv
->main_mask_reg_addr
);
2555 * mv_port_init - Perform some early initialization on a single port.
2556 * @port: libata data structure storing shadow register addresses
2557 * @port_mmio: base address of the port
2559 * Initialize shadow register mmio addresses, clear outstanding
2560 * interrupts on the port, and unmask interrupts for the future
2561 * start of the port.
2564 * Inherited from caller.
2566 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2568 void __iomem
*shd_base
= port_mmio
+ SHD_BLK_OFS
;
2571 /* PIO related setup
2573 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2575 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2576 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2577 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2578 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2579 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2580 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2582 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2583 /* special case: control/altstatus doesn't have ATA_REG_ address */
2584 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2587 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= NULL
;
2589 /* Clear any currently outstanding port interrupt conditions */
2590 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2591 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2592 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2594 /* unmask all non-transient EDMA error interrupts */
2595 writelfl(~EDMA_ERR_IRQ_TRANSIENT
, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2597 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2598 readl(port_mmio
+ EDMA_CFG_OFS
),
2599 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2600 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2603 static int mv_chip_id(struct ata_host
*host
, unsigned int board_idx
)
2605 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2606 struct mv_host_priv
*hpriv
= host
->private_data
;
2607 u32 hp_flags
= hpriv
->hp_flags
;
2609 switch (board_idx
) {
2611 hpriv
->ops
= &mv5xxx_ops
;
2612 hp_flags
|= MV_HP_GEN_I
;
2614 switch (pdev
->revision
) {
2616 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2619 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2622 dev_printk(KERN_WARNING
, &pdev
->dev
,
2623 "Applying 50XXB2 workarounds to unknown rev\n");
2624 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2631 hpriv
->ops
= &mv5xxx_ops
;
2632 hp_flags
|= MV_HP_GEN_I
;
2634 switch (pdev
->revision
) {
2636 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2639 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2642 dev_printk(KERN_WARNING
, &pdev
->dev
,
2643 "Applying B2 workarounds to unknown rev\n");
2644 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2651 hpriv
->ops
= &mv6xxx_ops
;
2652 hp_flags
|= MV_HP_GEN_II
;
2654 switch (pdev
->revision
) {
2656 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2659 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2662 dev_printk(KERN_WARNING
, &pdev
->dev
,
2663 "Applying B2 workarounds to unknown rev\n");
2664 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2670 hp_flags
|= MV_HP_PCIE
;
2671 if (pdev
->vendor
== PCI_VENDOR_ID_TTI
&&
2672 (pdev
->device
== 0x2300 || pdev
->device
== 0x2310))
2675 * Highpoint RocketRAID PCIe 23xx series cards:
2677 * Unconfigured drives are treated as "Legacy"
2678 * by the BIOS, and it overwrites sector 8 with
2679 * a "Lgcy" metadata block prior to Linux boot.
2681 * Configured drives (RAID or JBOD) leave sector 8
2682 * alone, but instead overwrite a high numbered
2683 * sector for the RAID metadata. This sector can
2684 * be determined exactly, by truncating the physical
2685 * drive capacity to a nice even GB value.
2687 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2689 * Warn the user, lest they think we're just buggy.
2691 printk(KERN_WARNING DRV_NAME
": Highpoint RocketRAID"
2692 " BIOS CORRUPTS DATA on all attached drives,"
2693 " regardless of if/how they are configured."
2695 printk(KERN_WARNING DRV_NAME
": For data safety, do not"
2696 " use sectors 8-9 on \"Legacy\" drives,"
2697 " and avoid the final two gigabytes on"
2698 " all RocketRAID BIOS initialized drives.\n");
2701 hpriv
->ops
= &mv6xxx_ops
;
2702 hp_flags
|= MV_HP_GEN_IIE
;
2704 switch (pdev
->revision
) {
2706 hp_flags
|= MV_HP_ERRATA_XX42A0
;
2709 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2712 dev_printk(KERN_WARNING
, &pdev
->dev
,
2713 "Applying 60X1C0 workarounds to unknown rev\n");
2714 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2719 hpriv
->ops
= &mv_soc_ops
;
2720 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2724 dev_printk(KERN_ERR
, host
->dev
,
2725 "BUG: invalid board index %u\n", board_idx
);
2729 hpriv
->hp_flags
= hp_flags
;
2730 if (hp_flags
& MV_HP_PCIE
) {
2731 hpriv
->irq_cause_ofs
= PCIE_IRQ_CAUSE_OFS
;
2732 hpriv
->irq_mask_ofs
= PCIE_IRQ_MASK_OFS
;
2733 hpriv
->unmask_all_irqs
= PCIE_UNMASK_ALL_IRQS
;
2735 hpriv
->irq_cause_ofs
= PCI_IRQ_CAUSE_OFS
;
2736 hpriv
->irq_mask_ofs
= PCI_IRQ_MASK_OFS
;
2737 hpriv
->unmask_all_irqs
= PCI_UNMASK_ALL_IRQS
;
2744 * mv_init_host - Perform some early initialization of the host.
2745 * @host: ATA host to initialize
2746 * @board_idx: controller index
2748 * If possible, do an early global reset of the host. Then do
2749 * our port init and clear/unmask all/relevant host interrupts.
2752 * Inherited from caller.
2754 static int mv_init_host(struct ata_host
*host
, unsigned int board_idx
)
2756 int rc
= 0, n_hc
, port
, hc
;
2757 struct mv_host_priv
*hpriv
= host
->private_data
;
2758 void __iomem
*mmio
= hpriv
->base
;
2760 rc
= mv_chip_id(host
, board_idx
);
2764 if (HAS_PCI(host
)) {
2765 hpriv
->main_cause_reg_addr
= hpriv
->base
+
2766 HC_MAIN_IRQ_CAUSE_OFS
;
2767 hpriv
->main_mask_reg_addr
= hpriv
->base
+ HC_MAIN_IRQ_MASK_OFS
;
2769 hpriv
->main_cause_reg_addr
= hpriv
->base
+
2770 HC_SOC_MAIN_IRQ_CAUSE_OFS
;
2771 hpriv
->main_mask_reg_addr
= hpriv
->base
+
2772 HC_SOC_MAIN_IRQ_MASK_OFS
;
2774 /* global interrupt mask */
2775 writel(0, hpriv
->main_mask_reg_addr
);
2777 n_hc
= mv_get_hc_count(host
->ports
[0]->flags
);
2779 for (port
= 0; port
< host
->n_ports
; port
++)
2780 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
2782 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
2786 hpriv
->ops
->reset_flash(hpriv
, mmio
);
2787 hpriv
->ops
->reset_bus(host
, mmio
);
2788 hpriv
->ops
->enable_leds(hpriv
, mmio
);
2790 for (port
= 0; port
< host
->n_ports
; port
++) {
2791 if (IS_GEN_II(hpriv
)) {
2792 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2794 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2795 ifctl
|= (1 << 7); /* enable gen2i speed */
2796 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2797 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2800 hpriv
->ops
->phy_errata(hpriv
, mmio
, port
);
2803 for (port
= 0; port
< host
->n_ports
; port
++) {
2804 struct ata_port
*ap
= host
->ports
[port
];
2805 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2807 mv_port_init(&ap
->ioaddr
, port_mmio
);
2810 if (HAS_PCI(host
)) {
2811 unsigned int offset
= port_mmio
- mmio
;
2812 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, -1, "mmio");
2813 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, offset
, "port");
2818 for (hc
= 0; hc
< n_hc
; hc
++) {
2819 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2821 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2822 "(before clear)=0x%08x\n", hc
,
2823 readl(hc_mmio
+ HC_CFG_OFS
),
2824 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
2826 /* Clear any currently outstanding hc interrupt conditions */
2827 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2830 if (HAS_PCI(host
)) {
2831 /* Clear any currently outstanding host interrupt conditions */
2832 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
2834 /* and unmask interrupt generation for host regs */
2835 writelfl(hpriv
->unmask_all_irqs
, mmio
+ hpriv
->irq_mask_ofs
);
2836 if (IS_GEN_I(hpriv
))
2837 writelfl(~HC_MAIN_MASKED_IRQS_5
,
2838 hpriv
->main_mask_reg_addr
);
2840 writelfl(~HC_MAIN_MASKED_IRQS
,
2841 hpriv
->main_mask_reg_addr
);
2843 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2844 "PCI int cause/mask=0x%08x/0x%08x\n",
2845 readl(hpriv
->main_cause_reg_addr
),
2846 readl(hpriv
->main_mask_reg_addr
),
2847 readl(mmio
+ hpriv
->irq_cause_ofs
),
2848 readl(mmio
+ hpriv
->irq_mask_ofs
));
2850 writelfl(~HC_MAIN_MASKED_IRQS_SOC
,
2851 hpriv
->main_mask_reg_addr
);
2852 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2853 readl(hpriv
->main_cause_reg_addr
),
2854 readl(hpriv
->main_mask_reg_addr
));
2860 static int mv_create_dma_pools(struct mv_host_priv
*hpriv
, struct device
*dev
)
2862 hpriv
->crqb_pool
= dmam_pool_create("crqb_q", dev
, MV_CRQB_Q_SZ
,
2864 if (!hpriv
->crqb_pool
)
2867 hpriv
->crpb_pool
= dmam_pool_create("crpb_q", dev
, MV_CRPB_Q_SZ
,
2869 if (!hpriv
->crpb_pool
)
2872 hpriv
->sg_tbl_pool
= dmam_pool_create("sg_tbl", dev
, MV_SG_TBL_SZ
,
2874 if (!hpriv
->sg_tbl_pool
)
2881 * mv_platform_probe - handle a positive probe of an soc Marvell
2883 * @pdev: platform device found
2886 * Inherited from caller.
2888 static int mv_platform_probe(struct platform_device
*pdev
)
2890 static int printed_version
;
2891 const struct mv_sata_platform_data
*mv_platform_data
;
2892 const struct ata_port_info
*ppi
[] =
2893 { &mv_port_info
[chip_soc
], NULL
};
2894 struct ata_host
*host
;
2895 struct mv_host_priv
*hpriv
;
2896 struct resource
*res
;
2899 if (!printed_version
++)
2900 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2903 * Simple resource validation ..
2905 if (unlikely(pdev
->num_resources
!= 2)) {
2906 dev_err(&pdev
->dev
, "invalid number of resources\n");
2911 * Get the register base first
2913 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2918 mv_platform_data
= pdev
->dev
.platform_data
;
2919 n_ports
= mv_platform_data
->n_ports
;
2921 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
2922 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
2924 if (!host
|| !hpriv
)
2926 host
->private_data
= hpriv
;
2927 hpriv
->n_ports
= n_ports
;
2930 hpriv
->base
= devm_ioremap(&pdev
->dev
, res
->start
,
2931 res
->end
- res
->start
+ 1);
2932 hpriv
->base
-= MV_SATAHC0_REG_BASE
;
2934 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
2938 /* initialize adapter */
2939 rc
= mv_init_host(host
, chip_soc
);
2943 dev_printk(KERN_INFO
, &pdev
->dev
,
2944 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH
,
2947 return ata_host_activate(host
, platform_get_irq(pdev
, 0), mv_interrupt
,
2948 IRQF_SHARED
, &mv6_sht
);
2953 * mv_platform_remove - unplug a platform interface
2954 * @pdev: platform device
2956 * A platform bus SATA device has been unplugged. Perform the needed
2957 * cleanup. Also called on module unload for any active devices.
2959 static int __devexit
mv_platform_remove(struct platform_device
*pdev
)
2961 struct device
*dev
= &pdev
->dev
;
2962 struct ata_host
*host
= dev_get_drvdata(dev
);
2964 ata_host_detach(host
);
2968 static struct platform_driver mv_platform_driver
= {
2969 .probe
= mv_platform_probe
,
2970 .remove
= __devexit_p(mv_platform_remove
),
2973 .owner
= THIS_MODULE
,
2979 static int mv_pci_init_one(struct pci_dev
*pdev
,
2980 const struct pci_device_id
*ent
);
2983 static struct pci_driver mv_pci_driver
= {
2985 .id_table
= mv_pci_tbl
,
2986 .probe
= mv_pci_init_one
,
2987 .remove
= ata_pci_remove_one
,
2993 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
2996 /* move to PCI layer or libata core? */
2997 static int pci_go_64(struct pci_dev
*pdev
)
3001 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
3002 rc
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
3004 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3006 dev_printk(KERN_ERR
, &pdev
->dev
,
3007 "64-bit DMA enable failed\n");
3012 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
3014 dev_printk(KERN_ERR
, &pdev
->dev
,
3015 "32-bit DMA enable failed\n");
3018 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
3020 dev_printk(KERN_ERR
, &pdev
->dev
,
3021 "32-bit consistent DMA enable failed\n");
3030 * mv_print_info - Dump key info to kernel log for perusal.
3031 * @host: ATA host to print info about
3033 * FIXME: complete this.
3036 * Inherited from caller.
3038 static void mv_print_info(struct ata_host
*host
)
3040 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
3041 struct mv_host_priv
*hpriv
= host
->private_data
;
3043 const char *scc_s
, *gen
;
3045 /* Use this to determine the HW stepping of the chip so we know
3046 * what errata to workaround
3048 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
3051 else if (scc
== 0x01)
3056 if (IS_GEN_I(hpriv
))
3058 else if (IS_GEN_II(hpriv
))
3060 else if (IS_GEN_IIE(hpriv
))
3065 dev_printk(KERN_INFO
, &pdev
->dev
,
3066 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3067 gen
, (unsigned)MV_MAX_Q_DEPTH
, host
->n_ports
,
3068 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
3072 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3073 * @pdev: PCI device found
3074 * @ent: PCI device ID entry for the matched host
3077 * Inherited from caller.
3079 static int mv_pci_init_one(struct pci_dev
*pdev
,
3080 const struct pci_device_id
*ent
)
3082 static int printed_version
;
3083 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
3084 const struct ata_port_info
*ppi
[] = { &mv_port_info
[board_idx
], NULL
};
3085 struct ata_host
*host
;
3086 struct mv_host_priv
*hpriv
;
3089 if (!printed_version
++)
3090 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
3093 n_ports
= mv_get_hc_count(ppi
[0]->flags
) * MV_PORTS_PER_HC
;
3095 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
3096 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
3097 if (!host
|| !hpriv
)
3099 host
->private_data
= hpriv
;
3100 hpriv
->n_ports
= n_ports
;
3102 /* acquire resources */
3103 rc
= pcim_enable_device(pdev
);
3107 rc
= pcim_iomap_regions(pdev
, 1 << MV_PRIMARY_BAR
, DRV_NAME
);
3109 pcim_pin_device(pdev
);
3112 host
->iomap
= pcim_iomap_table(pdev
);
3113 hpriv
->base
= host
->iomap
[MV_PRIMARY_BAR
];
3115 rc
= pci_go_64(pdev
);
3119 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
3123 /* initialize adapter */
3124 rc
= mv_init_host(host
, board_idx
);
3128 /* Enable interrupts */
3129 if (msi
&& pci_enable_msi(pdev
))
3132 mv_dump_pci_cfg(pdev
, 0x68);
3133 mv_print_info(host
);
3135 pci_set_master(pdev
);
3136 pci_try_set_mwi(pdev
);
3137 return ata_host_activate(host
, pdev
->irq
, mv_interrupt
, IRQF_SHARED
,
3138 IS_GEN_I(hpriv
) ? &mv5_sht
: &mv6_sht
);
3142 static int mv_platform_probe(struct platform_device
*pdev
);
3143 static int __devexit
mv_platform_remove(struct platform_device
*pdev
);
3145 static int __init
mv_init(void)
3149 rc
= pci_register_driver(&mv_pci_driver
);
3153 rc
= platform_driver_register(&mv_platform_driver
);
3157 pci_unregister_driver(&mv_pci_driver
);
3162 static void __exit
mv_exit(void)
3165 pci_unregister_driver(&mv_pci_driver
);
3167 platform_driver_unregister(&mv_platform_driver
);
3170 MODULE_AUTHOR("Brett Russ");
3171 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3172 MODULE_LICENSE("GPL");
3173 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
3174 MODULE_VERSION(DRV_VERSION
);
3175 MODULE_ALIAS("platform:sata_mv");
3178 module_param(msi
, int, 0444);
3179 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
3182 module_init(mv_init
);
3183 module_exit(mv_exit
);