2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Improve/fix IRQ and error handling sequences.
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 6) Add port multiplier support (intermediate)
44 8) Develop a low-power-consumption strategy, and implement it.
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
88 MV_IO_BAR
= 2, /* offset 0x18: IO space */
89 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
95 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE
= (MV_IRQ_COAL_REG_BASE
+ 0x08),
97 MV_IRQ_COAL_CAUSE_LO
= (MV_IRQ_COAL_REG_BASE
+ 0x88),
98 MV_IRQ_COAL_CAUSE_HI
= (MV_IRQ_COAL_REG_BASE
+ 0x8c),
99 MV_IRQ_COAL_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD
= (MV_IRQ_COAL_REG_BASE
+ 0xd0),
102 MV_SATAHC0_REG_BASE
= 0x20000,
103 MV_FLASH_CTL
= 0x1046c,
104 MV_GPIO_PORT_CTL
= 0x104f0,
105 MV_RESET_CFG
= 0x180d8,
107 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
108 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
109 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
110 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
113 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
120 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
122 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT
= 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
131 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC
= (1 << 28),
136 MV_COMMON_FLAGS
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
137 ATA_FLAG_MMIO
| ATA_FLAG_NO_ATAPI
|
138 ATA_FLAG_PIO_POLLING
,
139 MV_6XXX_FLAGS
= MV_FLAG_IRQ_COALESCE
,
141 CRQB_FLAG_READ
= (1 << 0),
143 CRQB_IOID_SHIFT
= 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT
= 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT
= 8,
146 CRQB_CMD_CS
= (0x2 << 11),
147 CRQB_CMD_LAST
= (1 << 15),
149 CRPB_FLAG_STATUS_SHIFT
= 8,
150 CRPB_IOID_SHIFT_6
= 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7
= 7, /* CRPB Gen-IIE IO Id shift */
153 EPRD_FLAG_END_OF_TBL
= (1 << 31),
155 /* PCI interface registers */
157 PCI_COMMAND_OFS
= 0xc00,
159 PCI_MAIN_CMD_STS_OFS
= 0xd30,
160 STOP_PCI_MASTER
= (1 << 2),
161 PCI_MASTER_EMPTY
= (1 << 3),
162 GLOB_SFT_RST
= (1 << 4),
165 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
166 MV_PCI_DISC_TIMER
= 0xd04,
167 MV_PCI_MSI_TRIGGER
= 0xc38,
168 MV_PCI_SERR_MASK
= 0xc28,
169 MV_PCI_XBAR_TMOUT
= 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
173 MV_PCI_ERR_COMMAND
= 0x1d50,
175 PCI_IRQ_CAUSE_OFS
= 0x1d58,
176 PCI_IRQ_MASK_OFS
= 0x1d5c,
177 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
179 PCIE_IRQ_CAUSE_OFS
= 0x1900,
180 PCIE_IRQ_MASK_OFS
= 0x1910,
181 PCIE_UNMASK_ALL_IRQS
= 0x40a, /* assorted bits */
183 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS
= 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS
= 0x20024,
187 PORT0_ERR
= (1 << 0), /* shift by port # */
188 PORT0_DONE
= (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
192 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE
= (1 << 8),
195 PORTS_4_7_COAL_DONE
= (1 << 17),
196 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT
= (1 << 22),
198 SELF_INT
= (1 << 23),
199 TWSI_INT
= (1 << 24),
200 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5
= (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC
= (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
204 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
206 HC_MAIN_MASKED_IRQS_5
= (PORTS_0_3_COAL_DONE
| PORTS_4_7_COAL_DONE
|
208 HC_MAIN_MASKED_IRQS_SOC
= (PORTS_0_3_COAL_DONE
| HC_MAIN_RSVD_SOC
),
210 /* SATAHC registers */
213 HC_IRQ_CAUSE_OFS
= 0x14,
214 CRPB_DMA_DONE
= (1 << 0), /* shift by port # */
215 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
216 DEV_IRQ
= (1 << 8), /* shift by port # */
218 /* Shadow block registers */
220 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
223 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS
= 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS
= 0x364,
232 SATA_INTERFACE_CTL
= 0x050,
234 MV_M2_PREAMP_MASK
= 0x7e0,
238 EDMA_CFG_Q_DEPTH
= 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ
= (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
244 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
245 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
246 EDMA_ERR_D_PAR
= (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR
= (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV
= (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON
= (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON
= (1 << 4), /* device connected */
251 EDMA_ERR_SERR
= (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS
= (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5
= (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC
= (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7
= (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR
= (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR
= (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR
= (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY
= (1 << 12), /* IORdy timeout */
261 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0
= (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1
= (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3
= (1 << 16), /* transient: FIS rx err */
267 EDMA_ERR_LNK_DATA_RX
= (0xf << 17), /* link data rx error */
269 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0
= (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1
= (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2
= (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3
= (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4
= (1 << 25), /* transient: FIS collision */
276 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26), /* link data tx error */
278 EDMA_ERR_TRANS_PROTO
= (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5
= (1 << 5),
280 EDMA_ERR_UNDERRUN_5
= (1 << 6),
282 EDMA_ERR_IRQ_TRANSIENT
= EDMA_ERR_LNK_CTRL_RX_0
|
283 EDMA_ERR_LNK_CTRL_RX_1
|
284 EDMA_ERR_LNK_CTRL_RX_3
|
285 EDMA_ERR_LNK_CTRL_TX
,
287 EDMA_EH_FREEZE
= EDMA_ERR_D_PAR
|
297 EDMA_ERR_LNK_CTRL_RX_2
|
298 EDMA_ERR_LNK_DATA_RX
|
299 EDMA_ERR_LNK_DATA_TX
|
300 EDMA_ERR_TRANS_PROTO
,
301 EDMA_EH_FREEZE_5
= EDMA_ERR_D_PAR
|
306 EDMA_ERR_UNDERRUN_5
|
307 EDMA_ERR_SELF_DIS_5
|
313 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
316 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
317 EDMA_REQ_Q_PTR_SHIFT
= 5,
319 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT
= 3,
324 EDMA_CMD_OFS
= 0x28, /* EDMA command register */
325 EDMA_EN
= (1 << 0), /* enable EDMA */
326 EDMA_DS
= (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST
= (1 << 2), /* reset trans/link/phy */
329 EDMA_IORDY_TMOUT
= 0x34,
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI
= (1 << 0),
334 MV_HP_ERRATA_50XXB0
= (1 << 1),
335 MV_HP_ERRATA_50XXB2
= (1 << 2),
336 MV_HP_ERRATA_60X1B2
= (1 << 3),
337 MV_HP_ERRATA_60X1C0
= (1 << 4),
338 MV_HP_ERRATA_XX42A0
= (1 << 5),
339 MV_HP_GEN_I
= (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II
= (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE
= (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE
= (1 << 9), /* PCIe bus/regs: 7042 */
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN
= (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN
= (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET
= (1 << 2), /* 1st hard reset complete? */
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
359 MV_DMA_BOUNDARY
= 0xffffU
,
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
364 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
381 /* Command ReQuest Block: 32B */
397 /* Command ResPonse Block: 8B */
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
412 struct mv_port_priv
{
413 struct mv_crqb
*crqb
;
415 struct mv_crpb
*crpb
;
417 struct mv_sg
*sg_tbl
[MV_MAX_Q_DEPTH
];
418 dma_addr_t sg_tbl_dma
[MV_MAX_Q_DEPTH
];
420 unsigned int req_idx
;
421 unsigned int resp_idx
;
426 struct mv_port_signal
{
431 struct mv_host_priv
{
433 struct mv_port_signal signal
[8];
434 const struct mv_hw_ops
*ops
;
437 void __iomem
*main_cause_reg_addr
;
438 void __iomem
*main_mask_reg_addr
;
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
447 struct dma_pool
*crqb_pool
;
448 struct dma_pool
*crpb_pool
;
449 struct dma_pool
*sg_tbl_pool
;
453 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
455 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
456 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
458 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
460 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
461 void (*reset_bus
)(struct ata_host
*host
, void __iomem
*mmio
);
464 static int mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
);
465 static int mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
466 static int mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
);
467 static int mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
468 static int mv_port_start(struct ata_port
*ap
);
469 static void mv_port_stop(struct ata_port
*ap
);
470 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
471 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
472 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
473 static void mv_error_handler(struct ata_port
*ap
);
474 static void mv_eh_freeze(struct ata_port
*ap
);
475 static void mv_eh_thaw(struct ata_port
*ap
);
476 static void mv6_dev_config(struct ata_device
*dev
);
478 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
480 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
481 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
483 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
485 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
486 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
488 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
490 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
491 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
493 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
495 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
496 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
498 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
500 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
501 void __iomem
*mmio
, unsigned int n_hc
);
502 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
504 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
);
505 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
);
506 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
507 unsigned int port_no
);
508 static void mv_edma_cfg(struct mv_port_priv
*pp
, struct mv_host_priv
*hpriv
,
509 void __iomem
*port_mmio
, int want_ncq
);
510 static int __mv_stop_dma(struct ata_port
*ap
);
512 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
516 static struct scsi_host_template mv5_sht
= {
517 ATA_BASE_SHT(DRV_NAME
),
518 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
519 .dma_boundary
= MV_DMA_BOUNDARY
,
522 static struct scsi_host_template mv6_sht
= {
523 ATA_NCQ_SHT(DRV_NAME
),
524 .can_queue
= MV_MAX_Q_DEPTH
- 1,
525 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
526 .dma_boundary
= MV_DMA_BOUNDARY
,
529 static const struct ata_port_operations mv5_ops
= {
530 .tf_load
= ata_tf_load
,
531 .tf_read
= ata_tf_read
,
532 .check_status
= ata_check_status
,
533 .exec_command
= ata_exec_command
,
534 .dev_select
= ata_std_dev_select
,
536 .qc_prep
= mv_qc_prep
,
537 .qc_issue
= mv_qc_issue
,
538 .data_xfer
= ata_data_xfer
,
540 .irq_clear
= ata_noop_irq_clear
,
541 .irq_on
= ata_irq_on
,
543 .error_handler
= mv_error_handler
,
544 .freeze
= mv_eh_freeze
,
547 .scr_read
= mv5_scr_read
,
548 .scr_write
= mv5_scr_write
,
550 .port_start
= mv_port_start
,
551 .port_stop
= mv_port_stop
,
554 static const struct ata_port_operations mv6_ops
= {
555 .dev_config
= mv6_dev_config
,
556 .tf_load
= ata_tf_load
,
557 .tf_read
= ata_tf_read
,
558 .check_status
= ata_check_status
,
559 .exec_command
= ata_exec_command
,
560 .dev_select
= ata_std_dev_select
,
562 .qc_prep
= mv_qc_prep
,
563 .qc_issue
= mv_qc_issue
,
564 .data_xfer
= ata_data_xfer
,
566 .irq_clear
= ata_noop_irq_clear
,
567 .irq_on
= ata_irq_on
,
569 .error_handler
= mv_error_handler
,
570 .freeze
= mv_eh_freeze
,
572 .qc_defer
= ata_std_qc_defer
,
574 .scr_read
= mv_scr_read
,
575 .scr_write
= mv_scr_write
,
577 .port_start
= mv_port_start
,
578 .port_stop
= mv_port_stop
,
581 static const struct ata_port_operations mv_iie_ops
= {
582 .tf_load
= ata_tf_load
,
583 .tf_read
= ata_tf_read
,
584 .check_status
= ata_check_status
,
585 .exec_command
= ata_exec_command
,
586 .dev_select
= ata_std_dev_select
,
588 .qc_prep
= mv_qc_prep_iie
,
589 .qc_issue
= mv_qc_issue
,
590 .data_xfer
= ata_data_xfer
,
592 .irq_clear
= ata_noop_irq_clear
,
593 .irq_on
= ata_irq_on
,
595 .error_handler
= mv_error_handler
,
596 .freeze
= mv_eh_freeze
,
598 .qc_defer
= ata_std_qc_defer
,
600 .scr_read
= mv_scr_read
,
601 .scr_write
= mv_scr_write
,
603 .port_start
= mv_port_start
,
604 .port_stop
= mv_port_stop
,
607 static const struct ata_port_info mv_port_info
[] = {
609 .flags
= MV_COMMON_FLAGS
,
610 .pio_mask
= 0x1f, /* pio0-4 */
611 .udma_mask
= ATA_UDMA6
,
612 .port_ops
= &mv5_ops
,
615 .flags
= MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
,
616 .pio_mask
= 0x1f, /* pio0-4 */
617 .udma_mask
= ATA_UDMA6
,
618 .port_ops
= &mv5_ops
,
621 .flags
= MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
,
622 .pio_mask
= 0x1f, /* pio0-4 */
623 .udma_mask
= ATA_UDMA6
,
624 .port_ops
= &mv5_ops
,
627 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
629 .pio_mask
= 0x1f, /* pio0-4 */
630 .udma_mask
= ATA_UDMA6
,
631 .port_ops
= &mv6_ops
,
634 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
635 ATA_FLAG_NCQ
| MV_FLAG_DUAL_HC
,
636 .pio_mask
= 0x1f, /* pio0-4 */
637 .udma_mask
= ATA_UDMA6
,
638 .port_ops
= &mv6_ops
,
641 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
643 .pio_mask
= 0x1f, /* pio0-4 */
644 .udma_mask
= ATA_UDMA6
,
645 .port_ops
= &mv_iie_ops
,
648 .flags
= MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
650 .pio_mask
= 0x1f, /* pio0-4 */
651 .udma_mask
= ATA_UDMA6
,
652 .port_ops
= &mv_iie_ops
,
655 .flags
= MV_COMMON_FLAGS
| MV_FLAG_SOC
,
656 .pio_mask
= 0x1f, /* pio0-4 */
657 .udma_mask
= ATA_UDMA6
,
658 .port_ops
= &mv_iie_ops
,
662 static const struct pci_device_id mv_pci_tbl
[] = {
663 { PCI_VDEVICE(MARVELL
, 0x5040), chip_504x
},
664 { PCI_VDEVICE(MARVELL
, 0x5041), chip_504x
},
665 { PCI_VDEVICE(MARVELL
, 0x5080), chip_5080
},
666 { PCI_VDEVICE(MARVELL
, 0x5081), chip_508x
},
667 /* RocketRAID 1740/174x have different identifiers */
668 { PCI_VDEVICE(TTI
, 0x1740), chip_508x
},
669 { PCI_VDEVICE(TTI
, 0x1742), chip_508x
},
671 { PCI_VDEVICE(MARVELL
, 0x6040), chip_604x
},
672 { PCI_VDEVICE(MARVELL
, 0x6041), chip_604x
},
673 { PCI_VDEVICE(MARVELL
, 0x6042), chip_6042
},
674 { PCI_VDEVICE(MARVELL
, 0x6080), chip_608x
},
675 { PCI_VDEVICE(MARVELL
, 0x6081), chip_608x
},
677 { PCI_VDEVICE(ADAPTEC2
, 0x0241), chip_604x
},
680 { PCI_VDEVICE(ADAPTEC2
, 0x0243), chip_7042
},
682 /* Marvell 7042 support */
683 { PCI_VDEVICE(MARVELL
, 0x7042), chip_7042
},
685 /* Highpoint RocketRAID PCIe series */
686 { PCI_VDEVICE(TTI
, 0x2300), chip_7042
},
687 { PCI_VDEVICE(TTI
, 0x2310), chip_7042
},
689 { } /* terminate list */
692 static const struct mv_hw_ops mv5xxx_ops
= {
693 .phy_errata
= mv5_phy_errata
,
694 .enable_leds
= mv5_enable_leds
,
695 .read_preamp
= mv5_read_preamp
,
696 .reset_hc
= mv5_reset_hc
,
697 .reset_flash
= mv5_reset_flash
,
698 .reset_bus
= mv5_reset_bus
,
701 static const struct mv_hw_ops mv6xxx_ops
= {
702 .phy_errata
= mv6_phy_errata
,
703 .enable_leds
= mv6_enable_leds
,
704 .read_preamp
= mv6_read_preamp
,
705 .reset_hc
= mv6_reset_hc
,
706 .reset_flash
= mv6_reset_flash
,
707 .reset_bus
= mv_reset_pci_bus
,
710 static const struct mv_hw_ops mv_soc_ops
= {
711 .phy_errata
= mv6_phy_errata
,
712 .enable_leds
= mv_soc_enable_leds
,
713 .read_preamp
= mv_soc_read_preamp
,
714 .reset_hc
= mv_soc_reset_hc
,
715 .reset_flash
= mv_soc_reset_flash
,
716 .reset_bus
= mv_soc_reset_bus
,
723 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
726 (void) readl(addr
); /* flush to avoid PCI posted write */
729 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
731 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
734 static inline unsigned int mv_hc_from_port(unsigned int port
)
736 return port
>> MV_PORT_HC_SHIFT
;
739 static inline unsigned int mv_hardport_from_port(unsigned int port
)
741 return port
& MV_PORT_MASK
;
744 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
747 return mv_hc_base(base
, mv_hc_from_port(port
));
750 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
752 return mv_hc_base_from_port(base
, port
) +
753 MV_SATAHC_ARBTR_REG_SZ
+
754 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
757 static inline void __iomem
*mv_host_base(struct ata_host
*host
)
759 struct mv_host_priv
*hpriv
= host
->private_data
;
763 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
765 return mv_port_base(mv_host_base(ap
->host
), ap
->port_no
);
768 static inline int mv_get_hc_count(unsigned long port_flags
)
770 return ((port_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
773 static void mv_set_edma_ptrs(void __iomem
*port_mmio
,
774 struct mv_host_priv
*hpriv
,
775 struct mv_port_priv
*pp
)
780 * initialize request queue
782 index
= (pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_REQ_Q_PTR_SHIFT
;
784 WARN_ON(pp
->crqb_dma
& 0x3ff);
785 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
786 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | index
,
787 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
789 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
790 writelfl((pp
->crqb_dma
& 0xffffffff) | index
,
791 port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
793 writelfl(index
, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
796 * initialize response queue
798 index
= (pp
->resp_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_RSP_Q_PTR_SHIFT
;
800 WARN_ON(pp
->crpb_dma
& 0xff);
801 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
803 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
804 writelfl((pp
->crpb_dma
& 0xffffffff) | index
,
805 port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
807 writelfl(index
, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
809 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) | index
,
810 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
814 * mv_start_dma - Enable eDMA engine
815 * @base: port base address
816 * @pp: port private data
818 * Verify the local cache of the eDMA state is accurate with a
822 * Inherited from caller.
824 static void mv_start_dma(struct ata_port
*ap
, void __iomem
*port_mmio
,
825 struct mv_port_priv
*pp
, u8 protocol
)
827 int want_ncq
= (protocol
== ATA_PROT_NCQ
);
829 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
830 int using_ncq
= ((pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
) != 0);
831 if (want_ncq
!= using_ncq
)
834 if (!(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
)) {
835 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
836 int hard_port
= mv_hardport_from_port(ap
->port_no
);
837 void __iomem
*hc_mmio
= mv_hc_base_from_port(
838 mv_host_base(ap
->host
), hard_port
);
839 u32 hc_irq_cause
, ipending
;
841 /* clear EDMA event indicators, if any */
842 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
844 /* clear EDMA interrupt indicator, if any */
845 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
846 ipending
= (DEV_IRQ
<< hard_port
) |
847 (CRPB_DMA_DONE
<< hard_port
);
848 if (hc_irq_cause
& ipending
) {
849 writelfl(hc_irq_cause
& ~ipending
,
850 hc_mmio
+ HC_IRQ_CAUSE_OFS
);
853 mv_edma_cfg(pp
, hpriv
, port_mmio
, want_ncq
);
855 /* clear FIS IRQ Cause */
856 writelfl(0, port_mmio
+ SATA_FIS_IRQ_CAUSE_OFS
);
858 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
860 writelfl(EDMA_EN
, port_mmio
+ EDMA_CMD_OFS
);
861 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
863 WARN_ON(!(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
)));
867 * __mv_stop_dma - Disable eDMA engine
868 * @ap: ATA channel to manipulate
870 * Verify the local cache of the eDMA state is accurate with a
874 * Inherited from caller.
876 static int __mv_stop_dma(struct ata_port
*ap
)
878 void __iomem
*port_mmio
= mv_ap_base(ap
);
879 struct mv_port_priv
*pp
= ap
->private_data
;
883 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
884 /* Disable EDMA if active. The disable bit auto clears.
886 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
887 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
889 WARN_ON(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
));
892 /* now properly wait for the eDMA to stop */
893 for (i
= 1000; i
> 0; i
--) {
894 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
895 if (!(reg
& EDMA_EN
))
902 ata_port_printk(ap
, KERN_ERR
, "Unable to stop eDMA\n");
909 static int mv_stop_dma(struct ata_port
*ap
)
914 spin_lock_irqsave(&ap
->host
->lock
, flags
);
915 rc
= __mv_stop_dma(ap
);
916 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
922 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
925 for (b
= 0; b
< bytes
; ) {
926 DPRINTK("%p: ", start
+ b
);
927 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
928 printk("%08x ", readl(start
+ b
));
936 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
941 for (b
= 0; b
< bytes
; ) {
942 DPRINTK("%02x: ", b
);
943 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
944 (void) pci_read_config_dword(pdev
, b
, &dw
);
952 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
953 struct pci_dev
*pdev
)
956 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
957 port
>> MV_PORT_HC_SHIFT
);
958 void __iomem
*port_base
;
959 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
962 start_hc
= start_port
= 0;
963 num_ports
= 8; /* shld be benign for 4 port devs */
966 start_hc
= port
>> MV_PORT_HC_SHIFT
;
968 num_ports
= num_hcs
= 1;
970 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
971 num_ports
> 1 ? num_ports
- 1 : start_port
);
974 DPRINTK("PCI config space regs:\n");
975 mv_dump_pci_cfg(pdev
, 0x68);
977 DPRINTK("PCI regs:\n");
978 mv_dump_mem(mmio_base
+0xc00, 0x3c);
979 mv_dump_mem(mmio_base
+0xd00, 0x34);
980 mv_dump_mem(mmio_base
+0xf00, 0x4);
981 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
982 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
983 hc_base
= mv_hc_base(mmio_base
, hc
);
984 DPRINTK("HC regs (HC %i):\n", hc
);
985 mv_dump_mem(hc_base
, 0x1c);
987 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
988 port_base
= mv_port_base(mmio_base
, p
);
989 DPRINTK("EDMA regs (port %i):\n", p
);
990 mv_dump_mem(port_base
, 0x54);
991 DPRINTK("SATA regs (port %i):\n", p
);
992 mv_dump_mem(port_base
+0x300, 0x60);
997 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
1001 switch (sc_reg_in
) {
1005 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
1008 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
1017 static int mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
)
1019 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1021 if (ofs
!= 0xffffffffU
) {
1022 *val
= readl(mv_ap_base(ap
) + ofs
);
1028 static int mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1030 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
1032 if (ofs
!= 0xffffffffU
) {
1033 writelfl(val
, mv_ap_base(ap
) + ofs
);
1039 static void mv6_dev_config(struct ata_device
*adev
)
1042 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1043 * See mv_qc_prep() for more info.
1045 if (adev
->flags
& ATA_DFLAG_NCQ
)
1046 if (adev
->max_sectors
> ATA_MAX_SECTORS
)
1047 adev
->max_sectors
= ATA_MAX_SECTORS
;
1050 static void mv_edma_cfg(struct mv_port_priv
*pp
, struct mv_host_priv
*hpriv
,
1051 void __iomem
*port_mmio
, int want_ncq
)
1055 /* set up non-NCQ EDMA configuration */
1056 cfg
= EDMA_CFG_Q_DEPTH
; /* always 0x1f for *all* chips */
1058 if (IS_GEN_I(hpriv
))
1059 cfg
|= (1 << 8); /* enab config burst size mask */
1061 else if (IS_GEN_II(hpriv
))
1062 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
1064 else if (IS_GEN_IIE(hpriv
)) {
1065 cfg
|= (1 << 23); /* do not mask PM field in rx'd FIS */
1066 cfg
|= (1 << 22); /* enab 4-entry host queue cache */
1067 cfg
|= (1 << 18); /* enab early completion */
1068 cfg
|= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1072 cfg
|= EDMA_CFG_NCQ
;
1073 pp
->pp_flags
|= MV_PP_FLAG_NCQ_EN
;
1075 pp
->pp_flags
&= ~MV_PP_FLAG_NCQ_EN
;
1077 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
1080 static void mv_port_free_dma_mem(struct ata_port
*ap
)
1082 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1083 struct mv_port_priv
*pp
= ap
->private_data
;
1087 dma_pool_free(hpriv
->crqb_pool
, pp
->crqb
, pp
->crqb_dma
);
1091 dma_pool_free(hpriv
->crpb_pool
, pp
->crpb
, pp
->crpb_dma
);
1095 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1096 * For later hardware, we have one unique sg_tbl per NCQ tag.
1098 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1099 if (pp
->sg_tbl
[tag
]) {
1100 if (tag
== 0 || !IS_GEN_I(hpriv
))
1101 dma_pool_free(hpriv
->sg_tbl_pool
,
1103 pp
->sg_tbl_dma
[tag
]);
1104 pp
->sg_tbl
[tag
] = NULL
;
1110 * mv_port_start - Port specific init/start routine.
1111 * @ap: ATA channel to manipulate
1113 * Allocate and point to DMA memory, init port private memory,
1117 * Inherited from caller.
1119 static int mv_port_start(struct ata_port
*ap
)
1121 struct device
*dev
= ap
->host
->dev
;
1122 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1123 struct mv_port_priv
*pp
;
1124 void __iomem
*port_mmio
= mv_ap_base(ap
);
1125 unsigned long flags
;
1128 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1131 ap
->private_data
= pp
;
1133 pp
->crqb
= dma_pool_alloc(hpriv
->crqb_pool
, GFP_KERNEL
, &pp
->crqb_dma
);
1136 memset(pp
->crqb
, 0, MV_CRQB_Q_SZ
);
1138 pp
->crpb
= dma_pool_alloc(hpriv
->crpb_pool
, GFP_KERNEL
, &pp
->crpb_dma
);
1140 goto out_port_free_dma_mem
;
1141 memset(pp
->crpb
, 0, MV_CRPB_Q_SZ
);
1144 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1145 * For later hardware, we need one unique sg_tbl per NCQ tag.
1147 for (tag
= 0; tag
< MV_MAX_Q_DEPTH
; ++tag
) {
1148 if (tag
== 0 || !IS_GEN_I(hpriv
)) {
1149 pp
->sg_tbl
[tag
] = dma_pool_alloc(hpriv
->sg_tbl_pool
,
1150 GFP_KERNEL
, &pp
->sg_tbl_dma
[tag
]);
1151 if (!pp
->sg_tbl
[tag
])
1152 goto out_port_free_dma_mem
;
1154 pp
->sg_tbl
[tag
] = pp
->sg_tbl
[0];
1155 pp
->sg_tbl_dma
[tag
] = pp
->sg_tbl_dma
[0];
1159 spin_lock_irqsave(&ap
->host
->lock
, flags
);
1161 mv_edma_cfg(pp
, hpriv
, port_mmio
, 0);
1162 mv_set_edma_ptrs(port_mmio
, hpriv
, pp
);
1164 spin_unlock_irqrestore(&ap
->host
->lock
, flags
);
1166 /* Don't turn on EDMA here...do it before DMA commands only. Else
1167 * we'll be unable to send non-data, PIO, etc due to restricted access
1172 out_port_free_dma_mem
:
1173 mv_port_free_dma_mem(ap
);
1178 * mv_port_stop - Port specific cleanup/stop routine.
1179 * @ap: ATA channel to manipulate
1181 * Stop DMA, cleanup port memory.
1184 * This routine uses the host lock to protect the DMA stop.
1186 static void mv_port_stop(struct ata_port
*ap
)
1189 mv_port_free_dma_mem(ap
);
1193 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1194 * @qc: queued command whose SG list to source from
1196 * Populate the SG list and mark the last entry.
1199 * Inherited from caller.
1201 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
1203 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1204 struct scatterlist
*sg
;
1205 struct mv_sg
*mv_sg
, *last_sg
= NULL
;
1208 mv_sg
= pp
->sg_tbl
[qc
->tag
];
1209 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1210 dma_addr_t addr
= sg_dma_address(sg
);
1211 u32 sg_len
= sg_dma_len(sg
);
1214 u32 offset
= addr
& 0xffff;
1217 if ((offset
+ sg_len
> 0x10000))
1218 len
= 0x10000 - offset
;
1220 mv_sg
->addr
= cpu_to_le32(addr
& 0xffffffff);
1221 mv_sg
->addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1222 mv_sg
->flags_size
= cpu_to_le32(len
& 0xffff);
1232 if (likely(last_sg
))
1233 last_sg
->flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1236 static void mv_crqb_pack_cmd(__le16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1238 u16 tmp
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1239 (last
? CRQB_CMD_LAST
: 0);
1240 *cmdw
= cpu_to_le16(tmp
);
1244 * mv_qc_prep - Host specific command preparation.
1245 * @qc: queued command to prepare
1247 * This routine simply redirects to the general purpose routine
1248 * if command is not DMA. Else, it handles prep of the CRQB
1249 * (command request block), does some sanity checking, and calls
1250 * the SG load routine.
1253 * Inherited from caller.
1255 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1257 struct ata_port
*ap
= qc
->ap
;
1258 struct mv_port_priv
*pp
= ap
->private_data
;
1260 struct ata_taskfile
*tf
;
1264 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1265 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1268 /* Fill in command request block
1270 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1271 flags
|= CRQB_FLAG_READ
;
1272 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1273 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1275 /* get current queue index from software */
1276 in_index
= pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
;
1278 pp
->crqb
[in_index
].sg_addr
=
1279 cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1280 pp
->crqb
[in_index
].sg_addr_hi
=
1281 cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1282 pp
->crqb
[in_index
].ctrl_flags
= cpu_to_le16(flags
);
1284 cw
= &pp
->crqb
[in_index
].ata_cmd
[0];
1287 /* Sadly, the CRQB cannot accomodate all registers--there are
1288 * only 11 bytes...so we must pick and choose required
1289 * registers based on the command. So, we drop feature and
1290 * hob_feature for [RW] DMA commands, but they are needed for
1291 * NCQ. NCQ will drop hob_nsect.
1293 switch (tf
->command
) {
1295 case ATA_CMD_READ_EXT
:
1297 case ATA_CMD_WRITE_EXT
:
1298 case ATA_CMD_WRITE_FUA_EXT
:
1299 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1301 case ATA_CMD_FPDMA_READ
:
1302 case ATA_CMD_FPDMA_WRITE
:
1303 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1304 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1307 /* The only other commands EDMA supports in non-queued and
1308 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1309 * of which are defined/used by Linux. If we get here, this
1310 * driver needs work.
1312 * FIXME: modify libata to give qc_prep a return value and
1313 * return error here.
1315 BUG_ON(tf
->command
);
1318 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1319 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1320 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1321 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1322 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1323 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1324 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1325 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1326 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1328 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1334 * mv_qc_prep_iie - Host specific command preparation.
1335 * @qc: queued command to prepare
1337 * This routine simply redirects to the general purpose routine
1338 * if command is not DMA. Else, it handles prep of the CRQB
1339 * (command request block), does some sanity checking, and calls
1340 * the SG load routine.
1343 * Inherited from caller.
1345 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1347 struct ata_port
*ap
= qc
->ap
;
1348 struct mv_port_priv
*pp
= ap
->private_data
;
1349 struct mv_crqb_iie
*crqb
;
1350 struct ata_taskfile
*tf
;
1354 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1355 (qc
->tf
.protocol
!= ATA_PROT_NCQ
))
1358 /* Fill in Gen IIE command request block
1360 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1361 flags
|= CRQB_FLAG_READ
;
1363 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1364 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1365 flags
|= qc
->tag
<< CRQB_HOSTQ_SHIFT
;
1367 /* get current queue index from software */
1368 in_index
= pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
;
1370 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[in_index
];
1371 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
[qc
->tag
] & 0xffffffff);
1372 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
[qc
->tag
] >> 16) >> 16);
1373 crqb
->flags
= cpu_to_le32(flags
);
1376 crqb
->ata_cmd
[0] = cpu_to_le32(
1377 (tf
->command
<< 16) |
1380 crqb
->ata_cmd
[1] = cpu_to_le32(
1386 crqb
->ata_cmd
[2] = cpu_to_le32(
1387 (tf
->hob_lbal
<< 0) |
1388 (tf
->hob_lbam
<< 8) |
1389 (tf
->hob_lbah
<< 16) |
1390 (tf
->hob_feature
<< 24)
1392 crqb
->ata_cmd
[3] = cpu_to_le32(
1394 (tf
->hob_nsect
<< 8)
1397 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1403 * mv_qc_issue - Initiate a command to the host
1404 * @qc: queued command to start
1406 * This routine simply redirects to the general purpose routine
1407 * if command is not DMA. Else, it sanity checks our local
1408 * caches of the request producer/consumer indices then enables
1409 * DMA and bumps the request producer index.
1412 * Inherited from caller.
1414 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1416 struct ata_port
*ap
= qc
->ap
;
1417 void __iomem
*port_mmio
= mv_ap_base(ap
);
1418 struct mv_port_priv
*pp
= ap
->private_data
;
1421 if ((qc
->tf
.protocol
!= ATA_PROT_DMA
) &&
1422 (qc
->tf
.protocol
!= ATA_PROT_NCQ
)) {
1423 /* We're about to send a non-EDMA capable command to the
1424 * port. Turn off EDMA so there won't be problems accessing
1425 * shadow block, etc registers.
1428 return ata_qc_issue_prot(qc
);
1431 mv_start_dma(ap
, port_mmio
, pp
, qc
->tf
.protocol
);
1435 in_index
= (pp
->req_idx
& MV_MAX_Q_DEPTH_MASK
) << EDMA_REQ_Q_PTR_SHIFT
;
1437 /* and write the request in pointer to kick the EDMA to life */
1438 writelfl((pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
) | in_index
,
1439 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1445 * mv_err_intr - Handle error interrupts on the port
1446 * @ap: ATA channel to manipulate
1447 * @reset_allowed: bool: 0 == don't trigger from reset here
1449 * In most cases, just clear the interrupt and move on. However,
1450 * some cases require an eDMA reset, which is done right before
1451 * the COMRESET in mv_phy_reset(). The SERR case requires a
1452 * clear of pending errors in the SATA SERROR register. Finally,
1453 * if the port disabled DMA, update our cached copy to match.
1456 * Inherited from caller.
1458 static void mv_err_intr(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
1460 void __iomem
*port_mmio
= mv_ap_base(ap
);
1461 u32 edma_err_cause
, eh_freeze_mask
, serr
= 0;
1462 struct mv_port_priv
*pp
= ap
->private_data
;
1463 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1464 unsigned int edma_enabled
= (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
1465 unsigned int action
= 0, err_mask
= 0;
1466 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1468 ata_ehi_clear_desc(ehi
);
1470 if (!edma_enabled
) {
1471 /* just a guess: do we need to do this? should we
1472 * expand this, and do it in all cases?
1474 sata_scr_read(&ap
->link
, SCR_ERROR
, &serr
);
1475 sata_scr_write_flush(&ap
->link
, SCR_ERROR
, serr
);
1478 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1480 ata_ehi_push_desc(ehi
, "edma_err 0x%08x", edma_err_cause
);
1483 * all generations share these EDMA error cause bits
1486 if (edma_err_cause
& EDMA_ERR_DEV
)
1487 err_mask
|= AC_ERR_DEV
;
1488 if (edma_err_cause
& (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
1489 EDMA_ERR_CRQB_PAR
| EDMA_ERR_CRPB_PAR
|
1490 EDMA_ERR_INTRL_PAR
)) {
1491 err_mask
|= AC_ERR_ATA_BUS
;
1492 action
|= ATA_EH_RESET
;
1493 ata_ehi_push_desc(ehi
, "parity error");
1495 if (edma_err_cause
& (EDMA_ERR_DEV_DCON
| EDMA_ERR_DEV_CON
)) {
1496 ata_ehi_hotplugged(ehi
);
1497 ata_ehi_push_desc(ehi
, edma_err_cause
& EDMA_ERR_DEV_DCON
?
1498 "dev disconnect" : "dev connect");
1499 action
|= ATA_EH_RESET
;
1502 if (IS_GEN_I(hpriv
)) {
1503 eh_freeze_mask
= EDMA_EH_FREEZE_5
;
1505 if (edma_err_cause
& EDMA_ERR_SELF_DIS_5
) {
1506 pp
= ap
->private_data
;
1507 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1508 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1511 eh_freeze_mask
= EDMA_EH_FREEZE
;
1513 if (edma_err_cause
& EDMA_ERR_SELF_DIS
) {
1514 pp
= ap
->private_data
;
1515 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1516 ata_ehi_push_desc(ehi
, "EDMA self-disable");
1519 if (edma_err_cause
& EDMA_ERR_SERR
) {
1520 sata_scr_read(&ap
->link
, SCR_ERROR
, &serr
);
1521 sata_scr_write_flush(&ap
->link
, SCR_ERROR
, serr
);
1522 err_mask
= AC_ERR_ATA_BUS
;
1523 action
|= ATA_EH_RESET
;
1527 /* Clear EDMA now that SERR cleanup done */
1528 writelfl(~edma_err_cause
, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1531 err_mask
= AC_ERR_OTHER
;
1532 action
|= ATA_EH_RESET
;
1535 ehi
->serror
|= serr
;
1536 ehi
->action
|= action
;
1539 qc
->err_mask
|= err_mask
;
1541 ehi
->err_mask
|= err_mask
;
1543 if (edma_err_cause
& eh_freeze_mask
)
1544 ata_port_freeze(ap
);
1549 static void mv_intr_pio(struct ata_port
*ap
)
1551 struct ata_queued_cmd
*qc
;
1554 /* ignore spurious intr if drive still BUSY */
1555 ata_status
= readb(ap
->ioaddr
.status_addr
);
1556 if (unlikely(ata_status
& ATA_BUSY
))
1559 /* get active ATA command */
1560 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1561 if (unlikely(!qc
)) /* no active tag */
1563 if (qc
->tf
.flags
& ATA_TFLAG_POLLING
) /* polling; we don't own qc */
1566 /* and finally, complete the ATA command */
1567 qc
->err_mask
|= ac_err_mask(ata_status
);
1568 ata_qc_complete(qc
);
1571 static void mv_intr_edma(struct ata_port
*ap
)
1573 void __iomem
*port_mmio
= mv_ap_base(ap
);
1574 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1575 struct mv_port_priv
*pp
= ap
->private_data
;
1576 struct ata_queued_cmd
*qc
;
1577 u32 out_index
, in_index
;
1578 bool work_done
= false;
1580 /* get h/w response queue pointer */
1581 in_index
= (readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
)
1582 >> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
;
1588 /* get s/w response queue last-read pointer, and compare */
1589 out_index
= pp
->resp_idx
& MV_MAX_Q_DEPTH_MASK
;
1590 if (in_index
== out_index
)
1593 /* 50xx: get active ATA command */
1594 if (IS_GEN_I(hpriv
))
1595 tag
= ap
->link
.active_tag
;
1597 /* Gen II/IIE: get active ATA command via tag, to enable
1598 * support for queueing. this works transparently for
1599 * queued and non-queued modes.
1602 tag
= le16_to_cpu(pp
->crpb
[out_index
].id
) & 0x1f;
1604 qc
= ata_qc_from_tag(ap
, tag
);
1606 /* For non-NCQ mode, the lower 8 bits of status
1607 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1608 * which should be zero if all went well.
1610 status
= le16_to_cpu(pp
->crpb
[out_index
].flags
);
1611 if ((status
& 0xff) && !(pp
->pp_flags
& MV_PP_FLAG_NCQ_EN
)) {
1612 mv_err_intr(ap
, qc
);
1616 /* and finally, complete the ATA command */
1619 ac_err_mask(status
>> CRPB_FLAG_STATUS_SHIFT
);
1620 ata_qc_complete(qc
);
1623 /* advance software response queue pointer, to
1624 * indicate (after the loop completes) to hardware
1625 * that we have consumed a response queue entry.
1632 writelfl((pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
) |
1633 (out_index
<< EDMA_RSP_Q_PTR_SHIFT
),
1634 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1638 * mv_host_intr - Handle all interrupts on the given host controller
1639 * @host: host specific structure
1640 * @relevant: port error bits relevant to this host controller
1641 * @hc: which host controller we're to look at
1643 * Read then write clear the HC interrupt status then walk each
1644 * port connected to the HC and see if it needs servicing. Port
1645 * success ints are reported in the HC interrupt status reg, the
1646 * port error ints are reported in the higher level main
1647 * interrupt status register and thus are passed in via the
1648 * 'relevant' argument.
1651 * Inherited from caller.
1653 static void mv_host_intr(struct ata_host
*host
, u32 relevant
, unsigned int hc
)
1655 struct mv_host_priv
*hpriv
= host
->private_data
;
1656 void __iomem
*mmio
= hpriv
->base
;
1657 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1659 int port
, port0
, last_port
;
1664 port0
= MV_PORTS_PER_HC
;
1667 last_port
= port0
+ MV_PORTS_PER_HC
;
1669 last_port
= port0
+ hpriv
->n_ports
;
1670 /* we'll need the HC success int register in most cases */
1671 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1675 writelfl(~hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1677 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1678 hc
, relevant
, hc_irq_cause
);
1680 for (port
= port0
; port
< last_port
; port
++) {
1681 struct ata_port
*ap
= host
->ports
[port
];
1682 struct mv_port_priv
*pp
;
1683 int have_err_bits
, hard_port
, shift
;
1685 if ((!ap
) || (ap
->flags
& ATA_FLAG_DISABLED
))
1688 pp
= ap
->private_data
;
1690 shift
= port
<< 1; /* (port * 2) */
1691 if (port
>= MV_PORTS_PER_HC
) {
1692 shift
++; /* skip bit 8 in the HC Main IRQ reg */
1694 have_err_bits
= ((PORT0_ERR
<< shift
) & relevant
);
1696 if (unlikely(have_err_bits
)) {
1697 struct ata_queued_cmd
*qc
;
1699 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1700 if (qc
&& (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1703 mv_err_intr(ap
, qc
);
1707 hard_port
= mv_hardport_from_port(port
); /* range 0..3 */
1709 if (pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
) {
1710 if ((CRPB_DMA_DONE
<< hard_port
) & hc_irq_cause
)
1713 if ((DEV_IRQ
<< hard_port
) & hc_irq_cause
)
1720 static void mv_pci_error(struct ata_host
*host
, void __iomem
*mmio
)
1722 struct mv_host_priv
*hpriv
= host
->private_data
;
1723 struct ata_port
*ap
;
1724 struct ata_queued_cmd
*qc
;
1725 struct ata_eh_info
*ehi
;
1726 unsigned int i
, err_mask
, printed
= 0;
1729 err_cause
= readl(mmio
+ hpriv
->irq_cause_ofs
);
1731 dev_printk(KERN_ERR
, host
->dev
, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1734 DPRINTK("All regs @ PCI error\n");
1735 mv_dump_all_regs(mmio
, -1, to_pci_dev(host
->dev
));
1737 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
1739 for (i
= 0; i
< host
->n_ports
; i
++) {
1740 ap
= host
->ports
[i
];
1741 if (!ata_link_offline(&ap
->link
)) {
1742 ehi
= &ap
->link
.eh_info
;
1743 ata_ehi_clear_desc(ehi
);
1745 ata_ehi_push_desc(ehi
,
1746 "PCI err cause 0x%08x", err_cause
);
1747 err_mask
= AC_ERR_HOST_BUS
;
1748 ehi
->action
= ATA_EH_RESET
;
1749 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1751 qc
->err_mask
|= err_mask
;
1753 ehi
->err_mask
|= err_mask
;
1755 ata_port_freeze(ap
);
1761 * mv_interrupt - Main interrupt event handler
1763 * @dev_instance: private data; in this case the host structure
1765 * Read the read only register to determine if any host
1766 * controllers have pending interrupts. If so, call lower level
1767 * routine to handle. Also check for PCI errors which are only
1771 * This routine holds the host lock while processing pending
1774 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
)
1776 struct ata_host
*host
= dev_instance
;
1777 struct mv_host_priv
*hpriv
= host
->private_data
;
1778 unsigned int hc
, handled
= 0, n_hcs
;
1779 void __iomem
*mmio
= hpriv
->base
;
1780 u32 irq_stat
, irq_mask
;
1782 spin_lock(&host
->lock
);
1784 irq_stat
= readl(hpriv
->main_cause_reg_addr
);
1785 irq_mask
= readl(hpriv
->main_mask_reg_addr
);
1787 /* check the cases where we either have nothing pending or have read
1788 * a bogus register value which can indicate HW removal or PCI fault
1790 if (!(irq_stat
& irq_mask
) || (0xffffffffU
== irq_stat
))
1793 n_hcs
= mv_get_hc_count(host
->ports
[0]->flags
);
1795 if (unlikely((irq_stat
& PCI_ERR
) && HAS_PCI(host
))) {
1796 mv_pci_error(host
, mmio
);
1798 goto out_unlock
; /* skip all other HC irq handling */
1801 for (hc
= 0; hc
< n_hcs
; hc
++) {
1802 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
1804 mv_host_intr(host
, relevant
, hc
);
1810 spin_unlock(&host
->lock
);
1812 return IRQ_RETVAL(handled
);
1815 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
1817 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
1818 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
1820 return hc_mmio
+ ofs
;
1823 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
1827 switch (sc_reg_in
) {
1831 ofs
= sc_reg_in
* sizeof(u32
);
1840 static int mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
, u32
*val
)
1842 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1843 void __iomem
*mmio
= hpriv
->base
;
1844 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1845 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1847 if (ofs
!= 0xffffffffU
) {
1848 *val
= readl(addr
+ ofs
);
1854 static int mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1856 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
1857 void __iomem
*mmio
= hpriv
->base
;
1858 void __iomem
*addr
= mv5_phy_base(mmio
, ap
->port_no
);
1859 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1861 if (ofs
!= 0xffffffffU
) {
1862 writelfl(val
, addr
+ ofs
);
1868 static void mv5_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
1870 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1873 early_5080
= (pdev
->device
== 0x5080) && (pdev
->revision
== 0);
1876 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1878 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1881 mv_reset_pci_bus(host
, mmio
);
1884 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1886 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL
);
1889 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1892 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
1895 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1897 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
1898 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
1901 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1905 writel(0, mmio
+ MV_GPIO_PORT_CTL
);
1907 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1909 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1911 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1914 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1917 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
1918 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1920 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
1923 tmp
= readl(phy_mmio
+ MV5_LT_MODE
);
1925 writel(tmp
, phy_mmio
+ MV5_LT_MODE
);
1927 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
1930 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
1933 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1935 tmp
|= hpriv
->signal
[port
].pre
;
1936 tmp
|= hpriv
->signal
[port
].amps
;
1937 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
1942 #define ZERO(reg) writel(0, port_mmio + (reg))
1943 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1946 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1948 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1950 mv_channel_reset(hpriv
, mmio
, port
);
1952 ZERO(0x028); /* command */
1953 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
1954 ZERO(0x004); /* timer */
1955 ZERO(0x008); /* irq err cause */
1956 ZERO(0x00c); /* irq err mask */
1957 ZERO(0x010); /* rq bah */
1958 ZERO(0x014); /* rq inp */
1959 ZERO(0x018); /* rq outp */
1960 ZERO(0x01c); /* respq bah */
1961 ZERO(0x024); /* respq outp */
1962 ZERO(0x020); /* respq inp */
1963 ZERO(0x02c); /* test control */
1964 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
1968 #define ZERO(reg) writel(0, hc_mmio + (reg))
1969 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1972 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1980 tmp
= readl(hc_mmio
+ 0x20);
1983 writel(tmp
, hc_mmio
+ 0x20);
1987 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1990 unsigned int hc
, port
;
1992 for (hc
= 0; hc
< n_hc
; hc
++) {
1993 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
1994 mv5_reset_hc_port(hpriv
, mmio
,
1995 (hc
* MV_PORTS_PER_HC
) + port
);
1997 mv5_reset_one_hc(hpriv
, mmio
, hc
);
2004 #define ZERO(reg) writel(0, mmio + (reg))
2005 static void mv_reset_pci_bus(struct ata_host
*host
, void __iomem
*mmio
)
2007 struct mv_host_priv
*hpriv
= host
->private_data
;
2010 tmp
= readl(mmio
+ MV_PCI_MODE
);
2012 writel(tmp
, mmio
+ MV_PCI_MODE
);
2014 ZERO(MV_PCI_DISC_TIMER
);
2015 ZERO(MV_PCI_MSI_TRIGGER
);
2016 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
2017 ZERO(HC_MAIN_IRQ_MASK_OFS
);
2018 ZERO(MV_PCI_SERR_MASK
);
2019 ZERO(hpriv
->irq_cause_ofs
);
2020 ZERO(hpriv
->irq_mask_ofs
);
2021 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
2022 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
2023 ZERO(MV_PCI_ERR_ATTRIBUTE
);
2024 ZERO(MV_PCI_ERR_COMMAND
);
2028 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2032 mv5_reset_flash(hpriv
, mmio
);
2034 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL
);
2036 tmp
|= (1 << 5) | (1 << 6);
2037 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL
);
2041 * mv6_reset_hc - Perform the 6xxx global soft reset
2042 * @mmio: base address of the HBA
2044 * This routine only applies to 6xxx parts.
2047 * Inherited from caller.
2049 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2052 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
2056 /* Following procedure defined in PCI "main command and status
2060 writel(t
| STOP_PCI_MASTER
, reg
);
2062 for (i
= 0; i
< 1000; i
++) {
2065 if (PCI_MASTER_EMPTY
& t
)
2068 if (!(PCI_MASTER_EMPTY
& t
)) {
2069 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
2077 writel(t
| GLOB_SFT_RST
, reg
);
2080 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
2082 if (!(GLOB_SFT_RST
& t
)) {
2083 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
2088 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2091 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
2094 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
2096 if (GLOB_SFT_RST
& t
) {
2097 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
2104 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2107 void __iomem
*port_mmio
;
2110 tmp
= readl(mmio
+ MV_RESET_CFG
);
2111 if ((tmp
& (1 << 0)) == 0) {
2112 hpriv
->signal
[idx
].amps
= 0x7 << 8;
2113 hpriv
->signal
[idx
].pre
= 0x1 << 5;
2117 port_mmio
= mv_port_base(mmio
, idx
);
2118 tmp
= readl(port_mmio
+ PHY_MODE2
);
2120 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
2121 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
2124 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
2126 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL
);
2129 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2132 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2134 u32 hp_flags
= hpriv
->hp_flags
;
2136 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2138 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
2141 if (fix_phy_mode2
) {
2142 m2
= readl(port_mmio
+ PHY_MODE2
);
2145 writel(m2
, port_mmio
+ PHY_MODE2
);
2149 m2
= readl(port_mmio
+ PHY_MODE2
);
2150 m2
&= ~((1 << 16) | (1 << 31));
2151 writel(m2
, port_mmio
+ PHY_MODE2
);
2156 /* who knows what this magic does */
2157 tmp
= readl(port_mmio
+ PHY_MODE3
);
2160 writel(tmp
, port_mmio
+ PHY_MODE3
);
2162 if (fix_phy_mode4
) {
2165 m4
= readl(port_mmio
+ PHY_MODE4
);
2167 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
2168 tmp
= readl(port_mmio
+ 0x310);
2170 m4
= (m4
& ~(1 << 1)) | (1 << 0);
2172 writel(m4
, port_mmio
+ PHY_MODE4
);
2174 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
2175 writel(tmp
, port_mmio
+ 0x310);
2178 /* Revert values of pre-emphasis and signal amps to the saved ones */
2179 m2
= readl(port_mmio
+ PHY_MODE2
);
2181 m2
&= ~MV_M2_PREAMP_MASK
;
2182 m2
|= hpriv
->signal
[port
].amps
;
2183 m2
|= hpriv
->signal
[port
].pre
;
2186 /* according to mvSata 3.6.1, some IIE values are fixed */
2187 if (IS_GEN_IIE(hpriv
)) {
2192 writel(m2
, port_mmio
+ PHY_MODE2
);
2195 /* TODO: use the generic LED interface to configure the SATA Presence */
2196 /* & Acitivy LEDs on the board */
2197 static void mv_soc_enable_leds(struct mv_host_priv
*hpriv
,
2203 static void mv_soc_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
2206 void __iomem
*port_mmio
;
2209 port_mmio
= mv_port_base(mmio
, idx
);
2210 tmp
= readl(port_mmio
+ PHY_MODE2
);
2212 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
2213 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
2217 #define ZERO(reg) writel(0, port_mmio + (reg))
2218 static void mv_soc_reset_hc_port(struct mv_host_priv
*hpriv
,
2219 void __iomem
*mmio
, unsigned int port
)
2221 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2223 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
2225 mv_channel_reset(hpriv
, mmio
, port
);
2227 ZERO(0x028); /* command */
2228 writel(0x101f, port_mmio
+ EDMA_CFG_OFS
);
2229 ZERO(0x004); /* timer */
2230 ZERO(0x008); /* irq err cause */
2231 ZERO(0x00c); /* irq err mask */
2232 ZERO(0x010); /* rq bah */
2233 ZERO(0x014); /* rq inp */
2234 ZERO(0x018); /* rq outp */
2235 ZERO(0x01c); /* respq bah */
2236 ZERO(0x024); /* respq outp */
2237 ZERO(0x020); /* respq inp */
2238 ZERO(0x02c); /* test control */
2239 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
2244 #define ZERO(reg) writel(0, hc_mmio + (reg))
2245 static void mv_soc_reset_one_hc(struct mv_host_priv
*hpriv
,
2248 void __iomem
*hc_mmio
= mv_hc_base(mmio
, 0);
2258 static int mv_soc_reset_hc(struct mv_host_priv
*hpriv
,
2259 void __iomem
*mmio
, unsigned int n_hc
)
2263 for (port
= 0; port
< hpriv
->n_ports
; port
++)
2264 mv_soc_reset_hc_port(hpriv
, mmio
, port
);
2266 mv_soc_reset_one_hc(hpriv
, mmio
);
2271 static void mv_soc_reset_flash(struct mv_host_priv
*hpriv
,
2277 static void mv_soc_reset_bus(struct ata_host
*host
, void __iomem
*mmio
)
2282 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
2283 unsigned int port_no
)
2285 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
2287 writelfl(ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
2289 if (IS_GEN_II(hpriv
)) {
2290 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2291 ifctl
|= (1 << 7); /* enable gen2i speed */
2292 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2293 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2296 udelay(25); /* allow reset propagation */
2298 /* Spec never mentions clearing the bit. Marvell's driver does
2299 * clear the bit, however.
2301 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
2303 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
2305 if (IS_GEN_I(hpriv
))
2310 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2311 * @ap: ATA channel to manipulate
2313 * Part of this is taken from __sata_phy_reset and modified to
2314 * not sleep since this routine gets called from interrupt level.
2317 * Inherited from caller. This is coded to safe to call at
2318 * interrupt level, i.e. it does not sleep.
2320 static void mv_phy_reset(struct ata_port
*ap
, unsigned int *class,
2321 unsigned long deadline
)
2323 struct mv_port_priv
*pp
= ap
->private_data
;
2324 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2325 void __iomem
*port_mmio
= mv_ap_base(ap
);
2329 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
2333 u32 sstatus
, serror
, scontrol
;
2335 mv_scr_read(ap
, SCR_STATUS
, &sstatus
);
2336 mv_scr_read(ap
, SCR_ERROR
, &serror
);
2337 mv_scr_read(ap
, SCR_CONTROL
, &scontrol
);
2338 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2339 "SCtrl 0x%08x\n", sstatus
, serror
, scontrol
);
2343 /* Issue COMRESET via SControl */
2345 sata_scr_write_flush(&ap
->link
, SCR_CONTROL
, 0x301);
2348 sata_scr_write_flush(&ap
->link
, SCR_CONTROL
, 0x300);
2352 sata_scr_read(&ap
->link
, SCR_STATUS
, &sstatus
);
2353 if (((sstatus
& 0x3) == 3) || ((sstatus
& 0x3) == 0))
2357 } while (time_before(jiffies
, deadline
));
2359 /* work around errata */
2360 if (IS_GEN_II(hpriv
) &&
2361 (sstatus
!= 0x0) && (sstatus
!= 0x113) && (sstatus
!= 0x123) &&
2363 goto comreset_retry
;
2367 u32 sstatus
, serror
, scontrol
;
2369 mv_scr_read(ap
, SCR_STATUS
, &sstatus
);
2370 mv_scr_read(ap
, SCR_ERROR
, &serror
);
2371 mv_scr_read(ap
, SCR_CONTROL
, &scontrol
);
2372 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2373 "SCtrl 0x%08x\n", sstatus
, serror
, scontrol
);
2377 if (ata_link_offline(&ap
->link
)) {
2378 *class = ATA_DEV_NONE
;
2382 /* even after SStatus reflects that device is ready,
2383 * it seems to take a while for link to be fully
2384 * established (and thus Status no longer 0x80/0x7F),
2385 * so we poll a bit for that, here.
2389 u8 drv_stat
= ata_check_status(ap
);
2390 if ((drv_stat
!= 0x80) && (drv_stat
!= 0x7f))
2395 if (time_after(jiffies
, deadline
))
2399 /* FIXME: if we passed the deadline, the following
2400 * code probably produces an invalid result
2403 /* finally, read device signature from TF registers */
2404 *class = ata_dev_try_classify(ap
->link
.device
, 1, NULL
);
2406 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2408 WARN_ON(pp
->pp_flags
& MV_PP_FLAG_EDMA_EN
);
2413 static int mv_prereset(struct ata_link
*link
, unsigned long deadline
)
2415 struct ata_port
*ap
= link
->ap
;
2416 struct mv_port_priv
*pp
= ap
->private_data
;
2420 if (!(pp
->pp_flags
& MV_PP_FLAG_HAD_A_RESET
))
2421 pp
->pp_flags
|= MV_PP_FLAG_HAD_A_RESET
;
2426 static int mv_hardreset(struct ata_link
*link
, unsigned int *class,
2427 unsigned long deadline
)
2429 struct ata_port
*ap
= link
->ap
;
2430 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2431 void __iomem
*mmio
= hpriv
->base
;
2435 mv_channel_reset(hpriv
, mmio
, ap
->port_no
);
2437 mv_phy_reset(ap
, class, deadline
);
2442 static void mv_postreset(struct ata_link
*link
, unsigned int *classes
)
2444 struct ata_port
*ap
= link
->ap
;
2447 /* print link status */
2448 sata_print_link_status(link
);
2451 sata_scr_read(link
, SCR_ERROR
, &serr
);
2452 sata_scr_write_flush(link
, SCR_ERROR
, serr
);
2454 /* bail out if no device is present */
2455 if (classes
[0] == ATA_DEV_NONE
&& classes
[1] == ATA_DEV_NONE
) {
2456 DPRINTK("EXIT, no device\n");
2460 /* set up device control */
2461 iowrite8(ap
->ctl
, ap
->ioaddr
.ctl_addr
);
2464 static void mv_error_handler(struct ata_port
*ap
)
2466 ata_do_eh(ap
, mv_prereset
, ata_std_softreset
,
2467 mv_hardreset
, mv_postreset
);
2470 static void mv_eh_freeze(struct ata_port
*ap
)
2472 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2473 unsigned int hc
= (ap
->port_no
> 3) ? 1 : 0;
2477 /* FIXME: handle coalescing completion events properly */
2479 shift
= ap
->port_no
* 2;
2483 mask
= 0x3 << shift
;
2485 /* disable assertion of portN err, done events */
2486 tmp
= readl(hpriv
->main_mask_reg_addr
);
2487 writelfl(tmp
& ~mask
, hpriv
->main_mask_reg_addr
);
2490 static void mv_eh_thaw(struct ata_port
*ap
)
2492 struct mv_host_priv
*hpriv
= ap
->host
->private_data
;
2493 void __iomem
*mmio
= hpriv
->base
;
2494 unsigned int hc
= (ap
->port_no
> 3) ? 1 : 0;
2495 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2496 void __iomem
*port_mmio
= mv_ap_base(ap
);
2497 u32 tmp
, mask
, hc_irq_cause
;
2498 unsigned int shift
, hc_port_no
= ap
->port_no
;
2500 /* FIXME: handle coalescing completion events properly */
2502 shift
= ap
->port_no
* 2;
2508 mask
= 0x3 << shift
;
2510 /* clear EDMA errors on this port */
2511 writel(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2513 /* clear pending irq events */
2514 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2515 hc_irq_cause
&= ~(1 << hc_port_no
); /* clear CRPB-done */
2516 hc_irq_cause
&= ~(1 << (hc_port_no
+ 8)); /* clear Device int */
2517 writel(hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2519 /* enable assertion of portN err, done events */
2520 tmp
= readl(hpriv
->main_mask_reg_addr
);
2521 writelfl(tmp
| mask
, hpriv
->main_mask_reg_addr
);
2525 * mv_port_init - Perform some early initialization on a single port.
2526 * @port: libata data structure storing shadow register addresses
2527 * @port_mmio: base address of the port
2529 * Initialize shadow register mmio addresses, clear outstanding
2530 * interrupts on the port, and unmask interrupts for the future
2531 * start of the port.
2534 * Inherited from caller.
2536 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2538 void __iomem
*shd_base
= port_mmio
+ SHD_BLK_OFS
;
2541 /* PIO related setup
2543 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2545 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2546 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2547 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2548 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2549 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2550 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2552 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2553 /* special case: control/altstatus doesn't have ATA_REG_ address */
2554 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2557 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= NULL
;
2559 /* Clear any currently outstanding port interrupt conditions */
2560 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2561 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2562 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2564 /* unmask all non-transient EDMA error interrupts */
2565 writelfl(~EDMA_ERR_IRQ_TRANSIENT
, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2567 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2568 readl(port_mmio
+ EDMA_CFG_OFS
),
2569 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2570 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2573 static int mv_chip_id(struct ata_host
*host
, unsigned int board_idx
)
2575 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2576 struct mv_host_priv
*hpriv
= host
->private_data
;
2577 u32 hp_flags
= hpriv
->hp_flags
;
2579 switch (board_idx
) {
2581 hpriv
->ops
= &mv5xxx_ops
;
2582 hp_flags
|= MV_HP_GEN_I
;
2584 switch (pdev
->revision
) {
2586 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2589 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2592 dev_printk(KERN_WARNING
, &pdev
->dev
,
2593 "Applying 50XXB2 workarounds to unknown rev\n");
2594 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2601 hpriv
->ops
= &mv5xxx_ops
;
2602 hp_flags
|= MV_HP_GEN_I
;
2604 switch (pdev
->revision
) {
2606 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2609 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2612 dev_printk(KERN_WARNING
, &pdev
->dev
,
2613 "Applying B2 workarounds to unknown rev\n");
2614 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2621 hpriv
->ops
= &mv6xxx_ops
;
2622 hp_flags
|= MV_HP_GEN_II
;
2624 switch (pdev
->revision
) {
2626 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2629 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2632 dev_printk(KERN_WARNING
, &pdev
->dev
,
2633 "Applying B2 workarounds to unknown rev\n");
2634 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2640 hp_flags
|= MV_HP_PCIE
;
2641 if (pdev
->vendor
== PCI_VENDOR_ID_TTI
&&
2642 (pdev
->device
== 0x2300 || pdev
->device
== 0x2310))
2645 * Highpoint RocketRAID PCIe 23xx series cards:
2647 * Unconfigured drives are treated as "Legacy"
2648 * by the BIOS, and it overwrites sector 8 with
2649 * a "Lgcy" metadata block prior to Linux boot.
2651 * Configured drives (RAID or JBOD) leave sector 8
2652 * alone, but instead overwrite a high numbered
2653 * sector for the RAID metadata. This sector can
2654 * be determined exactly, by truncating the physical
2655 * drive capacity to a nice even GB value.
2657 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2659 * Warn the user, lest they think we're just buggy.
2661 printk(KERN_WARNING DRV_NAME
": Highpoint RocketRAID"
2662 " BIOS CORRUPTS DATA on all attached drives,"
2663 " regardless of if/how they are configured."
2665 printk(KERN_WARNING DRV_NAME
": For data safety, do not"
2666 " use sectors 8-9 on \"Legacy\" drives,"
2667 " and avoid the final two gigabytes on"
2668 " all RocketRAID BIOS initialized drives.\n");
2671 hpriv
->ops
= &mv6xxx_ops
;
2672 hp_flags
|= MV_HP_GEN_IIE
;
2674 switch (pdev
->revision
) {
2676 hp_flags
|= MV_HP_ERRATA_XX42A0
;
2679 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2682 dev_printk(KERN_WARNING
, &pdev
->dev
,
2683 "Applying 60X1C0 workarounds to unknown rev\n");
2684 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2689 hpriv
->ops
= &mv_soc_ops
;
2690 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2694 dev_printk(KERN_ERR
, host
->dev
,
2695 "BUG: invalid board index %u\n", board_idx
);
2699 hpriv
->hp_flags
= hp_flags
;
2700 if (hp_flags
& MV_HP_PCIE
) {
2701 hpriv
->irq_cause_ofs
= PCIE_IRQ_CAUSE_OFS
;
2702 hpriv
->irq_mask_ofs
= PCIE_IRQ_MASK_OFS
;
2703 hpriv
->unmask_all_irqs
= PCIE_UNMASK_ALL_IRQS
;
2705 hpriv
->irq_cause_ofs
= PCI_IRQ_CAUSE_OFS
;
2706 hpriv
->irq_mask_ofs
= PCI_IRQ_MASK_OFS
;
2707 hpriv
->unmask_all_irqs
= PCI_UNMASK_ALL_IRQS
;
2714 * mv_init_host - Perform some early initialization of the host.
2715 * @host: ATA host to initialize
2716 * @board_idx: controller index
2718 * If possible, do an early global reset of the host. Then do
2719 * our port init and clear/unmask all/relevant host interrupts.
2722 * Inherited from caller.
2724 static int mv_init_host(struct ata_host
*host
, unsigned int board_idx
)
2726 int rc
= 0, n_hc
, port
, hc
;
2727 struct mv_host_priv
*hpriv
= host
->private_data
;
2728 void __iomem
*mmio
= hpriv
->base
;
2730 rc
= mv_chip_id(host
, board_idx
);
2734 if (HAS_PCI(host
)) {
2735 hpriv
->main_cause_reg_addr
= hpriv
->base
+
2736 HC_MAIN_IRQ_CAUSE_OFS
;
2737 hpriv
->main_mask_reg_addr
= hpriv
->base
+ HC_MAIN_IRQ_MASK_OFS
;
2739 hpriv
->main_cause_reg_addr
= hpriv
->base
+
2740 HC_SOC_MAIN_IRQ_CAUSE_OFS
;
2741 hpriv
->main_mask_reg_addr
= hpriv
->base
+
2742 HC_SOC_MAIN_IRQ_MASK_OFS
;
2744 /* global interrupt mask */
2745 writel(0, hpriv
->main_mask_reg_addr
);
2747 n_hc
= mv_get_hc_count(host
->ports
[0]->flags
);
2749 for (port
= 0; port
< host
->n_ports
; port
++)
2750 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
2752 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
2756 hpriv
->ops
->reset_flash(hpriv
, mmio
);
2757 hpriv
->ops
->reset_bus(host
, mmio
);
2758 hpriv
->ops
->enable_leds(hpriv
, mmio
);
2760 for (port
= 0; port
< host
->n_ports
; port
++) {
2761 if (IS_GEN_II(hpriv
)) {
2762 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2764 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2765 ifctl
|= (1 << 7); /* enable gen2i speed */
2766 ifctl
= (ifctl
& 0xfff) | 0x9b1000; /* from chip spec */
2767 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2770 hpriv
->ops
->phy_errata(hpriv
, mmio
, port
);
2773 for (port
= 0; port
< host
->n_ports
; port
++) {
2774 struct ata_port
*ap
= host
->ports
[port
];
2775 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2777 mv_port_init(&ap
->ioaddr
, port_mmio
);
2780 if (HAS_PCI(host
)) {
2781 unsigned int offset
= port_mmio
- mmio
;
2782 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, -1, "mmio");
2783 ata_port_pbar_desc(ap
, MV_PRIMARY_BAR
, offset
, "port");
2788 for (hc
= 0; hc
< n_hc
; hc
++) {
2789 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2791 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2792 "(before clear)=0x%08x\n", hc
,
2793 readl(hc_mmio
+ HC_CFG_OFS
),
2794 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
2796 /* Clear any currently outstanding hc interrupt conditions */
2797 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2800 if (HAS_PCI(host
)) {
2801 /* Clear any currently outstanding host interrupt conditions */
2802 writelfl(0, mmio
+ hpriv
->irq_cause_ofs
);
2804 /* and unmask interrupt generation for host regs */
2805 writelfl(hpriv
->unmask_all_irqs
, mmio
+ hpriv
->irq_mask_ofs
);
2806 if (IS_GEN_I(hpriv
))
2807 writelfl(~HC_MAIN_MASKED_IRQS_5
,
2808 hpriv
->main_mask_reg_addr
);
2810 writelfl(~HC_MAIN_MASKED_IRQS
,
2811 hpriv
->main_mask_reg_addr
);
2813 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2814 "PCI int cause/mask=0x%08x/0x%08x\n",
2815 readl(hpriv
->main_cause_reg_addr
),
2816 readl(hpriv
->main_mask_reg_addr
),
2817 readl(mmio
+ hpriv
->irq_cause_ofs
),
2818 readl(mmio
+ hpriv
->irq_mask_ofs
));
2820 writelfl(~HC_MAIN_MASKED_IRQS_SOC
,
2821 hpriv
->main_mask_reg_addr
);
2822 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2823 readl(hpriv
->main_cause_reg_addr
),
2824 readl(hpriv
->main_mask_reg_addr
));
2830 static int mv_create_dma_pools(struct mv_host_priv
*hpriv
, struct device
*dev
)
2832 hpriv
->crqb_pool
= dmam_pool_create("crqb_q", dev
, MV_CRQB_Q_SZ
,
2834 if (!hpriv
->crqb_pool
)
2837 hpriv
->crpb_pool
= dmam_pool_create("crpb_q", dev
, MV_CRPB_Q_SZ
,
2839 if (!hpriv
->crpb_pool
)
2842 hpriv
->sg_tbl_pool
= dmam_pool_create("sg_tbl", dev
, MV_SG_TBL_SZ
,
2844 if (!hpriv
->sg_tbl_pool
)
2851 * mv_platform_probe - handle a positive probe of an soc Marvell
2853 * @pdev: platform device found
2856 * Inherited from caller.
2858 static int mv_platform_probe(struct platform_device
*pdev
)
2860 static int printed_version
;
2861 const struct mv_sata_platform_data
*mv_platform_data
;
2862 const struct ata_port_info
*ppi
[] =
2863 { &mv_port_info
[chip_soc
], NULL
};
2864 struct ata_host
*host
;
2865 struct mv_host_priv
*hpriv
;
2866 struct resource
*res
;
2869 if (!printed_version
++)
2870 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2873 * Simple resource validation ..
2875 if (unlikely(pdev
->num_resources
!= 2)) {
2876 dev_err(&pdev
->dev
, "invalid number of resources\n");
2881 * Get the register base first
2883 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2888 mv_platform_data
= pdev
->dev
.platform_data
;
2889 n_ports
= mv_platform_data
->n_ports
;
2891 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
2892 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
2894 if (!host
|| !hpriv
)
2896 host
->private_data
= hpriv
;
2897 hpriv
->n_ports
= n_ports
;
2900 hpriv
->base
= devm_ioremap(&pdev
->dev
, res
->start
,
2901 res
->end
- res
->start
+ 1);
2902 hpriv
->base
-= MV_SATAHC0_REG_BASE
;
2904 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
2908 /* initialize adapter */
2909 rc
= mv_init_host(host
, chip_soc
);
2913 dev_printk(KERN_INFO
, &pdev
->dev
,
2914 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH
,
2917 return ata_host_activate(host
, platform_get_irq(pdev
, 0), mv_interrupt
,
2918 IRQF_SHARED
, &mv6_sht
);
2923 * mv_platform_remove - unplug a platform interface
2924 * @pdev: platform device
2926 * A platform bus SATA device has been unplugged. Perform the needed
2927 * cleanup. Also called on module unload for any active devices.
2929 static int __devexit
mv_platform_remove(struct platform_device
*pdev
)
2931 struct device
*dev
= &pdev
->dev
;
2932 struct ata_host
*host
= dev_get_drvdata(dev
);
2934 ata_host_detach(host
);
2938 static struct platform_driver mv_platform_driver
= {
2939 .probe
= mv_platform_probe
,
2940 .remove
= __devexit_p(mv_platform_remove
),
2943 .owner
= THIS_MODULE
,
2949 static int mv_pci_init_one(struct pci_dev
*pdev
,
2950 const struct pci_device_id
*ent
);
2953 static struct pci_driver mv_pci_driver
= {
2955 .id_table
= mv_pci_tbl
,
2956 .probe
= mv_pci_init_one
,
2957 .remove
= ata_pci_remove_one
,
2963 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
2966 /* move to PCI layer or libata core? */
2967 static int pci_go_64(struct pci_dev
*pdev
)
2971 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
2972 rc
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
2974 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
2976 dev_printk(KERN_ERR
, &pdev
->dev
,
2977 "64-bit DMA enable failed\n");
2982 rc
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
);
2984 dev_printk(KERN_ERR
, &pdev
->dev
,
2985 "32-bit DMA enable failed\n");
2988 rc
= pci_set_consistent_dma_mask(pdev
, DMA_32BIT_MASK
);
2990 dev_printk(KERN_ERR
, &pdev
->dev
,
2991 "32-bit consistent DMA enable failed\n");
3000 * mv_print_info - Dump key info to kernel log for perusal.
3001 * @host: ATA host to print info about
3003 * FIXME: complete this.
3006 * Inherited from caller.
3008 static void mv_print_info(struct ata_host
*host
)
3010 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
3011 struct mv_host_priv
*hpriv
= host
->private_data
;
3013 const char *scc_s
, *gen
;
3015 /* Use this to determine the HW stepping of the chip so we know
3016 * what errata to workaround
3018 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
3021 else if (scc
== 0x01)
3026 if (IS_GEN_I(hpriv
))
3028 else if (IS_GEN_II(hpriv
))
3030 else if (IS_GEN_IIE(hpriv
))
3035 dev_printk(KERN_INFO
, &pdev
->dev
,
3036 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3037 gen
, (unsigned)MV_MAX_Q_DEPTH
, host
->n_ports
,
3038 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
3042 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3043 * @pdev: PCI device found
3044 * @ent: PCI device ID entry for the matched host
3047 * Inherited from caller.
3049 static int mv_pci_init_one(struct pci_dev
*pdev
,
3050 const struct pci_device_id
*ent
)
3052 static int printed_version
;
3053 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
3054 const struct ata_port_info
*ppi
[] = { &mv_port_info
[board_idx
], NULL
};
3055 struct ata_host
*host
;
3056 struct mv_host_priv
*hpriv
;
3059 if (!printed_version
++)
3060 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
3063 n_ports
= mv_get_hc_count(ppi
[0]->flags
) * MV_PORTS_PER_HC
;
3065 host
= ata_host_alloc_pinfo(&pdev
->dev
, ppi
, n_ports
);
3066 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
3067 if (!host
|| !hpriv
)
3069 host
->private_data
= hpriv
;
3070 hpriv
->n_ports
= n_ports
;
3072 /* acquire resources */
3073 rc
= pcim_enable_device(pdev
);
3077 rc
= pcim_iomap_regions(pdev
, 1 << MV_PRIMARY_BAR
, DRV_NAME
);
3079 pcim_pin_device(pdev
);
3082 host
->iomap
= pcim_iomap_table(pdev
);
3083 hpriv
->base
= host
->iomap
[MV_PRIMARY_BAR
];
3085 rc
= pci_go_64(pdev
);
3089 rc
= mv_create_dma_pools(hpriv
, &pdev
->dev
);
3093 /* initialize adapter */
3094 rc
= mv_init_host(host
, board_idx
);
3098 /* Enable interrupts */
3099 if (msi
&& pci_enable_msi(pdev
))
3102 mv_dump_pci_cfg(pdev
, 0x68);
3103 mv_print_info(host
);
3105 pci_set_master(pdev
);
3106 pci_try_set_mwi(pdev
);
3107 return ata_host_activate(host
, pdev
->irq
, mv_interrupt
, IRQF_SHARED
,
3108 IS_GEN_I(hpriv
) ? &mv5_sht
: &mv6_sht
);
3112 static int mv_platform_probe(struct platform_device
*pdev
);
3113 static int __devexit
mv_platform_remove(struct platform_device
*pdev
);
3115 static int __init
mv_init(void)
3119 rc
= pci_register_driver(&mv_pci_driver
);
3123 rc
= platform_driver_register(&mv_platform_driver
);
3127 pci_unregister_driver(&mv_pci_driver
);
3132 static void __exit
mv_exit(void)
3135 pci_unregister_driver(&mv_pci_driver
);
3137 platform_driver_unregister(&mv_platform_driver
);
3140 MODULE_AUTHOR("Brett Russ");
3141 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3142 MODULE_LICENSE("GPL");
3143 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
3144 MODULE_VERSION(DRV_VERSION
);
3145 MODULE_ALIAS("platform:sata_mv");
3148 module_param(msi
, int, 0444);
3149 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
3152 module_init(mv_init
);
3153 module_exit(mv_exit
);