2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <linux/init.h>
28 #include <linux/blkdev.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/sched.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_cmnd.h>
36 #include <linux/libata.h>
39 #define DRV_NAME "sata_mv"
40 #define DRV_VERSION "0.6"
43 /* BAR's are enumerated in terms of pci_resource_start() terms */
44 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
45 MV_IO_BAR
= 2, /* offset 0x18: IO space */
46 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
48 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
49 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
52 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
53 MV_SATAHC0_REG_BASE
= 0x20000,
54 MV_FLASH_CTL
= 0x1046c,
55 MV_GPIO_PORT_CTL
= 0x104f0,
56 MV_RESET_CFG
= 0x180d8,
58 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
59 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
60 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
61 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
63 MV_USE_Q_DEPTH
= ATA_DEF_QUEUE
,
66 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
68 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
69 * CRPB needs alignment on a 256B boundary. Size == 256B
70 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
71 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
73 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
74 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
76 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
77 MV_PORT_PRIV_DMA_SZ
= (MV_CRQB_Q_SZ
+ MV_CRPB_Q_SZ
+ MV_SG_TBL_SZ
),
80 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
82 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
86 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
87 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
88 MV_COMMON_FLAGS
= (ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
89 ATA_FLAG_SATA_RESET
| ATA_FLAG_MMIO
|
90 ATA_FLAG_PIO_POLLING
),
91 MV_6XXX_FLAGS
= MV_FLAG_IRQ_COALESCE
,
93 CRQB_FLAG_READ
= (1 << 0),
95 CRQB_CMD_ADDR_SHIFT
= 8,
96 CRQB_CMD_CS
= (0x2 << 11),
97 CRQB_CMD_LAST
= (1 << 15),
99 CRPB_FLAG_STATUS_SHIFT
= 8,
101 EPRD_FLAG_END_OF_TBL
= (1 << 31),
103 /* PCI interface registers */
105 PCI_COMMAND_OFS
= 0xc00,
107 PCI_MAIN_CMD_STS_OFS
= 0xd30,
108 STOP_PCI_MASTER
= (1 << 2),
109 PCI_MASTER_EMPTY
= (1 << 3),
110 GLOB_SFT_RST
= (1 << 4),
113 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
114 MV_PCI_DISC_TIMER
= 0xd04,
115 MV_PCI_MSI_TRIGGER
= 0xc38,
116 MV_PCI_SERR_MASK
= 0xc28,
117 MV_PCI_XBAR_TMOUT
= 0x1d04,
118 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
119 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
120 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
121 MV_PCI_ERR_COMMAND
= 0x1d50,
123 PCI_IRQ_CAUSE_OFS
= 0x1d58,
124 PCI_IRQ_MASK_OFS
= 0x1d5c,
125 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
127 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
128 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
129 PORT0_ERR
= (1 << 0), /* shift by port # */
130 PORT0_DONE
= (1 << 1), /* shift by port # */
131 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
132 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
134 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
135 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
136 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
137 GPIO_INT
= (1 << 22),
138 SELF_INT
= (1 << 23),
139 TWSI_INT
= (1 << 24),
140 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
141 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
142 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
145 /* SATAHC registers */
148 HC_IRQ_CAUSE_OFS
= 0x14,
149 CRPB_DMA_DONE
= (1 << 0), /* shift by port # */
150 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
151 DEV_IRQ
= (1 << 8), /* shift by port # */
153 /* Shadow block registers */
155 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
158 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
159 SATA_ACTIVE_OFS
= 0x350,
166 SATA_INTERFACE_CTL
= 0x050,
168 MV_M2_PREAMP_MASK
= 0x7e0,
172 EDMA_CFG_Q_DEPTH
= 0, /* queueing disabled */
173 EDMA_CFG_NCQ
= (1 << 5),
174 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
175 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
176 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
178 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
179 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
180 EDMA_ERR_D_PAR
= (1 << 0),
181 EDMA_ERR_PRD_PAR
= (1 << 1),
182 EDMA_ERR_DEV
= (1 << 2),
183 EDMA_ERR_DEV_DCON
= (1 << 3),
184 EDMA_ERR_DEV_CON
= (1 << 4),
185 EDMA_ERR_SERR
= (1 << 5),
186 EDMA_ERR_SELF_DIS
= (1 << 7),
187 EDMA_ERR_BIST_ASYNC
= (1 << 8),
188 EDMA_ERR_CRBQ_PAR
= (1 << 9),
189 EDMA_ERR_CRPB_PAR
= (1 << 10),
190 EDMA_ERR_INTRL_PAR
= (1 << 11),
191 EDMA_ERR_IORDY
= (1 << 12),
192 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13),
193 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15),
194 EDMA_ERR_LNK_DATA_RX
= (0xf << 17),
195 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21),
196 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26),
197 EDMA_ERR_TRANS_PROTO
= (1 << 31),
198 EDMA_ERR_FATAL
= (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
199 EDMA_ERR_DEV_DCON
| EDMA_ERR_CRBQ_PAR
|
200 EDMA_ERR_CRPB_PAR
| EDMA_ERR_INTRL_PAR
|
201 EDMA_ERR_IORDY
| EDMA_ERR_LNK_CTRL_RX_2
|
202 EDMA_ERR_LNK_DATA_RX
|
203 EDMA_ERR_LNK_DATA_TX
|
204 EDMA_ERR_TRANS_PROTO
),
206 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
207 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
209 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
210 EDMA_REQ_Q_PTR_SHIFT
= 5,
212 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
213 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
214 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
215 EDMA_RSP_Q_PTR_SHIFT
= 3,
222 EDMA_IORDY_TMOUT
= 0x34,
225 /* Host private flags (hp_flags) */
226 MV_HP_FLAG_MSI
= (1 << 0),
227 MV_HP_ERRATA_50XXB0
= (1 << 1),
228 MV_HP_ERRATA_50XXB2
= (1 << 2),
229 MV_HP_ERRATA_60X1B2
= (1 << 3),
230 MV_HP_ERRATA_60X1C0
= (1 << 4),
231 MV_HP_ERRATA_XX42A0
= (1 << 5),
232 MV_HP_50XX
= (1 << 6),
233 MV_HP_GEN_IIE
= (1 << 7),
235 /* Port private flags (pp_flags) */
236 MV_PP_FLAG_EDMA_EN
= (1 << 0),
237 MV_PP_FLAG_EDMA_DS_ACT
= (1 << 1),
240 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
241 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
242 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
243 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
244 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
247 /* Our DMA boundary is determined by an ePRD being unable to handle
248 * anything larger than 64KB
250 MV_DMA_BOUNDARY
= 0xffffU
,
252 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
254 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
267 /* Command ReQuest Block: 32B */
283 /* Command ResPonse Block: 8B */
290 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
298 struct mv_port_priv
{
299 struct mv_crqb
*crqb
;
301 struct mv_crpb
*crpb
;
303 struct mv_sg
*sg_tbl
;
304 dma_addr_t sg_tbl_dma
;
306 unsigned req_producer
; /* cp of req_in_ptr */
307 unsigned rsp_consumer
; /* cp of rsp_out_ptr */
311 struct mv_port_signal
{
318 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
320 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
321 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
323 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
325 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
326 void (*reset_bus
)(struct pci_dev
*pdev
, void __iomem
*mmio
);
329 struct mv_host_priv
{
331 struct mv_port_signal signal
[8];
332 const struct mv_hw_ops
*ops
;
335 static void mv_irq_clear(struct ata_port
*ap
);
336 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
337 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
338 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
339 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
340 static void mv_phy_reset(struct ata_port
*ap
);
341 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
);
342 static void mv_host_stop(struct ata_host_set
*host_set
);
343 static int mv_port_start(struct ata_port
*ap
);
344 static void mv_port_stop(struct ata_port
*ap
);
345 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
346 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
347 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
348 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
,
349 struct pt_regs
*regs
);
350 static void mv_eng_timeout(struct ata_port
*ap
);
351 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
353 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
355 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
356 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
358 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
360 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
361 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
363 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
365 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
366 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
368 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
370 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
371 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
372 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
373 unsigned int port_no
);
374 static void mv_stop_and_reset(struct ata_port
*ap
);
376 static struct scsi_host_template mv_sht
= {
377 .module
= THIS_MODULE
,
379 .ioctl
= ata_scsi_ioctl
,
380 .queuecommand
= ata_scsi_queuecmd
,
381 .eh_timed_out
= ata_scsi_timed_out
,
382 .eh_strategy_handler
= ata_scsi_error
,
383 .can_queue
= MV_USE_Q_DEPTH
,
384 .this_id
= ATA_SHT_THIS_ID
,
385 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
386 .max_sectors
= ATA_MAX_SECTORS
,
387 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
388 .emulated
= ATA_SHT_EMULATED
,
389 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
390 .proc_name
= DRV_NAME
,
391 .dma_boundary
= MV_DMA_BOUNDARY
,
392 .slave_configure
= ata_scsi_slave_config
,
393 .bios_param
= ata_std_bios_param
,
396 static const struct ata_port_operations mv5_ops
= {
397 .port_disable
= ata_port_disable
,
399 .tf_load
= ata_tf_load
,
400 .tf_read
= ata_tf_read
,
401 .check_status
= ata_check_status
,
402 .exec_command
= ata_exec_command
,
403 .dev_select
= ata_std_dev_select
,
405 .phy_reset
= mv_phy_reset
,
407 .qc_prep
= mv_qc_prep
,
408 .qc_issue
= mv_qc_issue
,
410 .eng_timeout
= mv_eng_timeout
,
412 .irq_handler
= mv_interrupt
,
413 .irq_clear
= mv_irq_clear
,
415 .scr_read
= mv5_scr_read
,
416 .scr_write
= mv5_scr_write
,
418 .port_start
= mv_port_start
,
419 .port_stop
= mv_port_stop
,
420 .host_stop
= mv_host_stop
,
423 static const struct ata_port_operations mv6_ops
= {
424 .port_disable
= ata_port_disable
,
426 .tf_load
= ata_tf_load
,
427 .tf_read
= ata_tf_read
,
428 .check_status
= ata_check_status
,
429 .exec_command
= ata_exec_command
,
430 .dev_select
= ata_std_dev_select
,
432 .phy_reset
= mv_phy_reset
,
434 .qc_prep
= mv_qc_prep
,
435 .qc_issue
= mv_qc_issue
,
437 .eng_timeout
= mv_eng_timeout
,
439 .irq_handler
= mv_interrupt
,
440 .irq_clear
= mv_irq_clear
,
442 .scr_read
= mv_scr_read
,
443 .scr_write
= mv_scr_write
,
445 .port_start
= mv_port_start
,
446 .port_stop
= mv_port_stop
,
447 .host_stop
= mv_host_stop
,
450 static const struct ata_port_operations mv_iie_ops
= {
451 .port_disable
= ata_port_disable
,
453 .tf_load
= ata_tf_load
,
454 .tf_read
= ata_tf_read
,
455 .check_status
= ata_check_status
,
456 .exec_command
= ata_exec_command
,
457 .dev_select
= ata_std_dev_select
,
459 .phy_reset
= mv_phy_reset
,
461 .qc_prep
= mv_qc_prep_iie
,
462 .qc_issue
= mv_qc_issue
,
464 .eng_timeout
= mv_eng_timeout
,
466 .irq_handler
= mv_interrupt
,
467 .irq_clear
= mv_irq_clear
,
469 .scr_read
= mv_scr_read
,
470 .scr_write
= mv_scr_write
,
472 .port_start
= mv_port_start
,
473 .port_stop
= mv_port_stop
,
474 .host_stop
= mv_host_stop
,
477 static const struct ata_port_info mv_port_info
[] = {
480 .host_flags
= MV_COMMON_FLAGS
,
481 .pio_mask
= 0x1f, /* pio0-4 */
482 .udma_mask
= 0x7f, /* udma0-6 */
483 .port_ops
= &mv5_ops
,
487 .host_flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
488 .pio_mask
= 0x1f, /* pio0-4 */
489 .udma_mask
= 0x7f, /* udma0-6 */
490 .port_ops
= &mv5_ops
,
494 .host_flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
495 .pio_mask
= 0x1f, /* pio0-4 */
496 .udma_mask
= 0x7f, /* udma0-6 */
497 .port_ops
= &mv5_ops
,
501 .host_flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
502 .pio_mask
= 0x1f, /* pio0-4 */
503 .udma_mask
= 0x7f, /* udma0-6 */
504 .port_ops
= &mv6_ops
,
508 .host_flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
510 .pio_mask
= 0x1f, /* pio0-4 */
511 .udma_mask
= 0x7f, /* udma0-6 */
512 .port_ops
= &mv6_ops
,
516 .host_flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
517 .pio_mask
= 0x1f, /* pio0-4 */
518 .udma_mask
= 0x7f, /* udma0-6 */
519 .port_ops
= &mv_iie_ops
,
523 .host_flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
525 .pio_mask
= 0x1f, /* pio0-4 */
526 .udma_mask
= 0x7f, /* udma0-6 */
527 .port_ops
= &mv_iie_ops
,
531 static const struct pci_device_id mv_pci_tbl
[] = {
532 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5040), 0, 0, chip_504x
},
533 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5041), 0, 0, chip_504x
},
534 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5080), 0, 0, chip_5080
},
535 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5081), 0, 0, chip_508x
},
537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6040), 0, 0, chip_604x
},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6041), 0, 0, chip_604x
},
539 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6042), 0, 0, chip_6042
},
540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6080), 0, 0, chip_608x
},
541 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6081), 0, 0, chip_608x
},
543 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2
, 0x0241), 0, 0, chip_604x
},
544 {} /* terminate list */
547 static struct pci_driver mv_pci_driver
= {
549 .id_table
= mv_pci_tbl
,
550 .probe
= mv_init_one
,
551 .remove
= ata_pci_remove_one
,
554 static const struct mv_hw_ops mv5xxx_ops
= {
555 .phy_errata
= mv5_phy_errata
,
556 .enable_leds
= mv5_enable_leds
,
557 .read_preamp
= mv5_read_preamp
,
558 .reset_hc
= mv5_reset_hc
,
559 .reset_flash
= mv5_reset_flash
,
560 .reset_bus
= mv5_reset_bus
,
563 static const struct mv_hw_ops mv6xxx_ops
= {
564 .phy_errata
= mv6_phy_errata
,
565 .enable_leds
= mv6_enable_leds
,
566 .read_preamp
= mv6_read_preamp
,
567 .reset_hc
= mv6_reset_hc
,
568 .reset_flash
= mv6_reset_flash
,
569 .reset_bus
= mv_reset_pci_bus
,
575 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
582 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
585 (void) readl(addr
); /* flush to avoid PCI posted write */
588 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
590 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
593 static inline unsigned int mv_hc_from_port(unsigned int port
)
595 return port
>> MV_PORT_HC_SHIFT
;
598 static inline unsigned int mv_hardport_from_port(unsigned int port
)
600 return port
& MV_PORT_MASK
;
603 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
606 return mv_hc_base(base
, mv_hc_from_port(port
));
609 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
611 return mv_hc_base_from_port(base
, port
) +
612 MV_SATAHC_ARBTR_REG_SZ
+
613 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
616 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
618 return mv_port_base(ap
->host_set
->mmio_base
, ap
->port_no
);
621 static inline int mv_get_hc_count(unsigned long host_flags
)
623 return ((host_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
626 static void mv_irq_clear(struct ata_port
*ap
)
631 * mv_start_dma - Enable eDMA engine
632 * @base: port base address
633 * @pp: port private data
635 * Verify the local cache of the eDMA state is accurate with a
639 * Inherited from caller.
641 static void mv_start_dma(void __iomem
*base
, struct mv_port_priv
*pp
)
643 if (!(MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
)) {
644 writelfl(EDMA_EN
, base
+ EDMA_CMD_OFS
);
645 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
647 WARN_ON(!(EDMA_EN
& readl(base
+ EDMA_CMD_OFS
)));
651 * mv_stop_dma - Disable eDMA engine
652 * @ap: ATA channel to manipulate
654 * Verify the local cache of the eDMA state is accurate with a
658 * Inherited from caller.
660 static void mv_stop_dma(struct ata_port
*ap
)
662 void __iomem
*port_mmio
= mv_ap_base(ap
);
663 struct mv_port_priv
*pp
= ap
->private_data
;
667 if (MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
) {
668 /* Disable EDMA if active. The disable bit auto clears.
670 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
671 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
673 WARN_ON(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
));
676 /* now properly wait for the eDMA to stop */
677 for (i
= 1000; i
> 0; i
--) {
678 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
679 if (!(EDMA_EN
& reg
)) {
686 printk(KERN_ERR
"ata%u: Unable to stop eDMA\n", ap
->id
);
687 /* FIXME: Consider doing a reset here to recover */
692 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
695 for (b
= 0; b
< bytes
; ) {
696 DPRINTK("%p: ", start
+ b
);
697 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
698 printk("%08x ",readl(start
+ b
));
706 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
711 for (b
= 0; b
< bytes
; ) {
712 DPRINTK("%02x: ", b
);
713 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
714 (void) pci_read_config_dword(pdev
,b
,&dw
);
722 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
723 struct pci_dev
*pdev
)
726 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
727 port
>> MV_PORT_HC_SHIFT
);
728 void __iomem
*port_base
;
729 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
732 start_hc
= start_port
= 0;
733 num_ports
= 8; /* shld be benign for 4 port devs */
736 start_hc
= port
>> MV_PORT_HC_SHIFT
;
738 num_ports
= num_hcs
= 1;
740 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
741 num_ports
> 1 ? num_ports
- 1 : start_port
);
744 DPRINTK("PCI config space regs:\n");
745 mv_dump_pci_cfg(pdev
, 0x68);
747 DPRINTK("PCI regs:\n");
748 mv_dump_mem(mmio_base
+0xc00, 0x3c);
749 mv_dump_mem(mmio_base
+0xd00, 0x34);
750 mv_dump_mem(mmio_base
+0xf00, 0x4);
751 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
752 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
753 hc_base
= mv_hc_base(mmio_base
, port
>> MV_PORT_HC_SHIFT
);
754 DPRINTK("HC regs (HC %i):\n", hc
);
755 mv_dump_mem(hc_base
, 0x1c);
757 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
758 port_base
= mv_port_base(mmio_base
, p
);
759 DPRINTK("EDMA regs (port %i):\n",p
);
760 mv_dump_mem(port_base
, 0x54);
761 DPRINTK("SATA regs (port %i):\n",p
);
762 mv_dump_mem(port_base
+0x300, 0x60);
767 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
775 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
778 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
787 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
789 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
791 if (0xffffffffU
!= ofs
) {
792 return readl(mv_ap_base(ap
) + ofs
);
798 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
800 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
802 if (0xffffffffU
!= ofs
) {
803 writelfl(val
, mv_ap_base(ap
) + ofs
);
808 * mv_host_stop - Host specific cleanup/stop routine.
809 * @host_set: host data structure
811 * Disable ints, cleanup host memory, call general purpose
815 * Inherited from caller.
817 static void mv_host_stop(struct ata_host_set
*host_set
)
819 struct mv_host_priv
*hpriv
= host_set
->private_data
;
820 struct pci_dev
*pdev
= to_pci_dev(host_set
->dev
);
822 if (hpriv
->hp_flags
& MV_HP_FLAG_MSI
) {
823 pci_disable_msi(pdev
);
828 ata_host_stop(host_set
);
831 static inline void mv_priv_free(struct mv_port_priv
*pp
, struct device
*dev
)
833 dma_free_coherent(dev
, MV_PORT_PRIV_DMA_SZ
, pp
->crpb
, pp
->crpb_dma
);
836 static void mv_edma_cfg(struct mv_host_priv
*hpriv
, void __iomem
*port_mmio
)
838 u32 cfg
= readl(port_mmio
+ EDMA_CFG_OFS
);
840 /* set up non-NCQ EDMA configuration */
841 cfg
&= ~0x1f; /* clear queue depth */
842 cfg
&= ~EDMA_CFG_NCQ
; /* clear NCQ mode */
843 cfg
&= ~(1 << 9); /* disable equeue */
846 cfg
|= (1 << 8); /* enab config burst size mask */
848 else if (IS_GEN_II(hpriv
))
849 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
851 else if (IS_GEN_IIE(hpriv
)) {
852 cfg
|= (1 << 23); /* dis RX PM port mask */
853 cfg
&= ~(1 << 16); /* dis FIS-based switching (for now) */
854 cfg
&= ~(1 << 19); /* dis 128-entry queue (for now?) */
855 cfg
|= (1 << 18); /* enab early completion */
856 cfg
|= (1 << 17); /* enab host q cache */
857 cfg
|= (1 << 22); /* enab cutthrough */
860 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
864 * mv_port_start - Port specific init/start routine.
865 * @ap: ATA channel to manipulate
867 * Allocate and point to DMA memory, init port private memory,
871 * Inherited from caller.
873 static int mv_port_start(struct ata_port
*ap
)
875 struct device
*dev
= ap
->host_set
->dev
;
876 struct mv_host_priv
*hpriv
= ap
->host_set
->private_data
;
877 struct mv_port_priv
*pp
;
878 void __iomem
*port_mmio
= mv_ap_base(ap
);
883 pp
= kmalloc(sizeof(*pp
), GFP_KERNEL
);
886 memset(pp
, 0, sizeof(*pp
));
888 mem
= dma_alloc_coherent(dev
, MV_PORT_PRIV_DMA_SZ
, &mem_dma
,
892 memset(mem
, 0, MV_PORT_PRIV_DMA_SZ
);
894 rc
= ata_pad_alloc(ap
, dev
);
898 /* First item in chunk of DMA memory:
899 * 32-slot command request table (CRQB), 32 bytes each in size
902 pp
->crqb_dma
= mem_dma
;
904 mem_dma
+= MV_CRQB_Q_SZ
;
907 * 32-slot command response table (CRPB), 8 bytes each in size
910 pp
->crpb_dma
= mem_dma
;
912 mem_dma
+= MV_CRPB_Q_SZ
;
915 * Table of scatter-gather descriptors (ePRD), 16 bytes each
918 pp
->sg_tbl_dma
= mem_dma
;
920 mv_edma_cfg(hpriv
, port_mmio
);
922 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
923 writelfl(pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
,
924 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
926 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
927 writelfl(pp
->crqb_dma
& 0xffffffff,
928 port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
930 writelfl(0, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
932 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
934 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
935 writelfl(pp
->crpb_dma
& 0xffffffff,
936 port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
938 writelfl(0, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
940 writelfl(pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
,
941 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
943 pp
->req_producer
= pp
->rsp_consumer
= 0;
945 /* Don't turn on EDMA here...do it before DMA commands only. Else
946 * we'll be unable to send non-data, PIO, etc due to restricted access
949 ap
->private_data
= pp
;
953 mv_priv_free(pp
, dev
);
961 * mv_port_stop - Port specific cleanup/stop routine.
962 * @ap: ATA channel to manipulate
964 * Stop DMA, cleanup port memory.
967 * This routine uses the host_set lock to protect the DMA stop.
969 static void mv_port_stop(struct ata_port
*ap
)
971 struct device
*dev
= ap
->host_set
->dev
;
972 struct mv_port_priv
*pp
= ap
->private_data
;
975 spin_lock_irqsave(&ap
->host_set
->lock
, flags
);
977 spin_unlock_irqrestore(&ap
->host_set
->lock
, flags
);
979 ap
->private_data
= NULL
;
980 ata_pad_free(ap
, dev
);
981 mv_priv_free(pp
, dev
);
986 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
987 * @qc: queued command whose SG list to source from
989 * Populate the SG list and mark the last entry.
992 * Inherited from caller.
994 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
996 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
998 struct scatterlist
*sg
;
1000 ata_for_each_sg(sg
, qc
) {
1002 u32 sg_len
, len
, offset
;
1004 addr
= sg_dma_address(sg
);
1005 sg_len
= sg_dma_len(sg
);
1008 offset
= addr
& MV_DMA_BOUNDARY
;
1010 if ((offset
+ sg_len
) > 0x10000)
1011 len
= 0x10000 - offset
;
1013 pp
->sg_tbl
[i
].addr
= cpu_to_le32(addr
& 0xffffffff);
1014 pp
->sg_tbl
[i
].addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1015 pp
->sg_tbl
[i
].flags_size
= cpu_to_le32(len
);
1020 if (!sg_len
&& ata_sg_is_last(sg
, qc
))
1021 pp
->sg_tbl
[i
].flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1028 static inline unsigned mv_inc_q_index(unsigned *index
)
1030 *index
= (*index
+ 1) & MV_MAX_Q_DEPTH_MASK
;
1034 static inline void mv_crqb_pack_cmd(u16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1036 *cmdw
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1037 (last
? CRQB_CMD_LAST
: 0);
1041 * mv_qc_prep - Host specific command preparation.
1042 * @qc: queued command to prepare
1044 * This routine simply redirects to the general purpose routine
1045 * if command is not DMA. Else, it handles prep of the CRQB
1046 * (command request block), does some sanity checking, and calls
1047 * the SG load routine.
1050 * Inherited from caller.
1052 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1054 struct ata_port
*ap
= qc
->ap
;
1055 struct mv_port_priv
*pp
= ap
->private_data
;
1057 struct ata_taskfile
*tf
;
1060 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1063 /* the req producer index should be the same as we remember it */
1064 WARN_ON(((readl(mv_ap_base(qc
->ap
) + EDMA_REQ_Q_IN_PTR_OFS
) >>
1065 EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) !=
1068 /* Fill in command request block
1070 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1071 flags
|= CRQB_FLAG_READ
;
1072 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1073 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1075 pp
->crqb
[pp
->req_producer
].sg_addr
=
1076 cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1077 pp
->crqb
[pp
->req_producer
].sg_addr_hi
=
1078 cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1079 pp
->crqb
[pp
->req_producer
].ctrl_flags
= cpu_to_le16(flags
);
1081 cw
= &pp
->crqb
[pp
->req_producer
].ata_cmd
[0];
1084 /* Sadly, the CRQB cannot accomodate all registers--there are
1085 * only 11 bytes...so we must pick and choose required
1086 * registers based on the command. So, we drop feature and
1087 * hob_feature for [RW] DMA commands, but they are needed for
1088 * NCQ. NCQ will drop hob_nsect.
1090 switch (tf
->command
) {
1092 case ATA_CMD_READ_EXT
:
1094 case ATA_CMD_WRITE_EXT
:
1095 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1097 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1098 case ATA_CMD_FPDMA_READ
:
1099 case ATA_CMD_FPDMA_WRITE
:
1100 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1101 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1103 #endif /* FIXME: remove this line when NCQ added */
1105 /* The only other commands EDMA supports in non-queued and
1106 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1107 * of which are defined/used by Linux. If we get here, this
1108 * driver needs work.
1110 * FIXME: modify libata to give qc_prep a return value and
1111 * return error here.
1113 BUG_ON(tf
->command
);
1116 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1117 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1118 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1119 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1120 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1121 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1122 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1123 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1124 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1126 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1132 * mv_qc_prep_iie - Host specific command preparation.
1133 * @qc: queued command to prepare
1135 * This routine simply redirects to the general purpose routine
1136 * if command is not DMA. Else, it handles prep of the CRQB
1137 * (command request block), does some sanity checking, and calls
1138 * the SG load routine.
1141 * Inherited from caller.
1143 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1145 struct ata_port
*ap
= qc
->ap
;
1146 struct mv_port_priv
*pp
= ap
->private_data
;
1147 struct mv_crqb_iie
*crqb
;
1148 struct ata_taskfile
*tf
;
1151 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1154 /* the req producer index should be the same as we remember it */
1155 WARN_ON(((readl(mv_ap_base(qc
->ap
) + EDMA_REQ_Q_IN_PTR_OFS
) >>
1156 EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) !=
1159 /* Fill in Gen IIE command request block
1161 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1162 flags
|= CRQB_FLAG_READ
;
1164 WARN_ON(MV_MAX_Q_DEPTH
<= qc
->tag
);
1165 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1167 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[pp
->req_producer
];
1168 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1169 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1170 crqb
->flags
= cpu_to_le32(flags
);
1173 crqb
->ata_cmd
[0] = cpu_to_le32(
1174 (tf
->command
<< 16) |
1177 crqb
->ata_cmd
[1] = cpu_to_le32(
1183 crqb
->ata_cmd
[2] = cpu_to_le32(
1184 (tf
->hob_lbal
<< 0) |
1185 (tf
->hob_lbam
<< 8) |
1186 (tf
->hob_lbah
<< 16) |
1187 (tf
->hob_feature
<< 24)
1189 crqb
->ata_cmd
[3] = cpu_to_le32(
1191 (tf
->hob_nsect
<< 8)
1194 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1200 * mv_qc_issue - Initiate a command to the host
1201 * @qc: queued command to start
1203 * This routine simply redirects to the general purpose routine
1204 * if command is not DMA. Else, it sanity checks our local
1205 * caches of the request producer/consumer indices then enables
1206 * DMA and bumps the request producer index.
1209 * Inherited from caller.
1211 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1213 void __iomem
*port_mmio
= mv_ap_base(qc
->ap
);
1214 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1217 if (ATA_PROT_DMA
!= qc
->tf
.protocol
) {
1218 /* We're about to send a non-EDMA capable command to the
1219 * port. Turn off EDMA so there won't be problems accessing
1220 * shadow block, etc registers.
1222 mv_stop_dma(qc
->ap
);
1223 return ata_qc_issue_prot(qc
);
1226 in_ptr
= readl(port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1228 /* the req producer index should be the same as we remember it */
1229 WARN_ON(((in_ptr
>> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) !=
1231 /* until we do queuing, the queue should be empty at this point */
1232 WARN_ON(((in_ptr
>> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) !=
1233 ((readl(port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
) >>
1234 EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
));
1236 mv_inc_q_index(&pp
->req_producer
); /* now incr producer index */
1238 mv_start_dma(port_mmio
, pp
);
1240 /* and write the request in pointer to kick the EDMA to life */
1241 in_ptr
&= EDMA_REQ_Q_BASE_LO_MASK
;
1242 in_ptr
|= pp
->req_producer
<< EDMA_REQ_Q_PTR_SHIFT
;
1243 writelfl(in_ptr
, port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1249 * mv_get_crpb_status - get status from most recently completed cmd
1250 * @ap: ATA channel to manipulate
1252 * This routine is for use when the port is in DMA mode, when it
1253 * will be using the CRPB (command response block) method of
1254 * returning command completion information. We check indices
1255 * are good, grab status, and bump the response consumer index to
1256 * prove that we're up to date.
1259 * Inherited from caller.
1261 static u8
mv_get_crpb_status(struct ata_port
*ap
)
1263 void __iomem
*port_mmio
= mv_ap_base(ap
);
1264 struct mv_port_priv
*pp
= ap
->private_data
;
1267 out_ptr
= readl(port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1269 /* the response consumer index should be the same as we remember it */
1270 WARN_ON(((out_ptr
>> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) !=
1273 /* increment our consumer index... */
1274 pp
->rsp_consumer
= mv_inc_q_index(&pp
->rsp_consumer
);
1276 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1277 WARN_ON(((readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
) >>
1278 EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) !=
1281 /* write out our inc'd consumer index so EDMA knows we're caught up */
1282 out_ptr
&= EDMA_RSP_Q_BASE_LO_MASK
;
1283 out_ptr
|= pp
->rsp_consumer
<< EDMA_RSP_Q_PTR_SHIFT
;
1284 writelfl(out_ptr
, port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1286 /* Return ATA status register for completed CRPB */
1287 return (pp
->crpb
[pp
->rsp_consumer
].flags
>> CRPB_FLAG_STATUS_SHIFT
);
1291 * mv_err_intr - Handle error interrupts on the port
1292 * @ap: ATA channel to manipulate
1294 * In most cases, just clear the interrupt and move on. However,
1295 * some cases require an eDMA reset, which is done right before
1296 * the COMRESET in mv_phy_reset(). The SERR case requires a
1297 * clear of pending errors in the SATA SERROR register. Finally,
1298 * if the port disabled DMA, update our cached copy to match.
1301 * Inherited from caller.
1303 static void mv_err_intr(struct ata_port
*ap
)
1305 void __iomem
*port_mmio
= mv_ap_base(ap
);
1306 u32 edma_err_cause
, serr
= 0;
1308 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1310 if (EDMA_ERR_SERR
& edma_err_cause
) {
1311 serr
= scr_read(ap
, SCR_ERROR
);
1312 scr_write_flush(ap
, SCR_ERROR
, serr
);
1314 if (EDMA_ERR_SELF_DIS
& edma_err_cause
) {
1315 struct mv_port_priv
*pp
= ap
->private_data
;
1316 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1318 DPRINTK(KERN_ERR
"ata%u: port error; EDMA err cause: 0x%08x "
1319 "SERR: 0x%08x\n", ap
->id
, edma_err_cause
, serr
);
1321 /* Clear EDMA now that SERR cleanup done */
1322 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1324 /* check for fatal here and recover if needed */
1325 if (EDMA_ERR_FATAL
& edma_err_cause
) {
1326 mv_stop_and_reset(ap
);
1331 * mv_host_intr - Handle all interrupts on the given host controller
1332 * @host_set: host specific structure
1333 * @relevant: port error bits relevant to this host controller
1334 * @hc: which host controller we're to look at
1336 * Read then write clear the HC interrupt status then walk each
1337 * port connected to the HC and see if it needs servicing. Port
1338 * success ints are reported in the HC interrupt status reg, the
1339 * port error ints are reported in the higher level main
1340 * interrupt status register and thus are passed in via the
1341 * 'relevant' argument.
1344 * Inherited from caller.
1346 static void mv_host_intr(struct ata_host_set
*host_set
, u32 relevant
,
1349 void __iomem
*mmio
= host_set
->mmio_base
;
1350 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1351 struct ata_port
*ap
;
1352 struct ata_queued_cmd
*qc
;
1354 int shift
, port
, port0
, hard_port
, handled
;
1355 unsigned int err_mask
;
1361 port0
= MV_PORTS_PER_HC
;
1364 /* we'll need the HC success int register in most cases */
1365 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1367 writelfl(~hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1370 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1371 hc
,relevant
,hc_irq_cause
);
1373 for (port
= port0
; port
< port0
+ MV_PORTS_PER_HC
; port
++) {
1374 ap
= host_set
->ports
[port
];
1375 hard_port
= port
& MV_PORT_MASK
; /* range 0-3 */
1376 handled
= 0; /* ensure ata_status is set if handled++ */
1378 if ((CRPB_DMA_DONE
<< hard_port
) & hc_irq_cause
) {
1379 /* new CRPB on the queue; just one at a time until NCQ
1381 ata_status
= mv_get_crpb_status(ap
);
1383 } else if ((DEV_IRQ
<< hard_port
) & hc_irq_cause
) {
1384 /* received ATA IRQ; read the status reg to clear INTRQ
1386 ata_status
= readb((void __iomem
*)
1387 ap
->ioaddr
.status_addr
);
1391 if (ap
&& (ap
->flags
& ATA_FLAG_PORT_DISABLED
))
1394 err_mask
= ac_err_mask(ata_status
);
1396 shift
= port
<< 1; /* (port * 2) */
1397 if (port
>= MV_PORTS_PER_HC
) {
1398 shift
++; /* skip bit 8 in the HC Main IRQ reg */
1400 if ((PORT0_ERR
<< shift
) & relevant
) {
1402 err_mask
|= AC_ERR_OTHER
;
1406 if (handled
&& ap
) {
1407 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1409 VPRINTK("port %u IRQ found for qc, "
1410 "ata_status 0x%x\n", port
,ata_status
);
1411 /* mark qc status appropriately */
1412 if (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) {
1413 qc
->err_mask
|= err_mask
;
1414 ata_qc_complete(qc
);
1425 * @dev_instance: private data; in this case the host structure
1428 * Read the read only register to determine if any host
1429 * controllers have pending interrupts. If so, call lower level
1430 * routine to handle. Also check for PCI errors which are only
1434 * This routine holds the host_set lock while processing pending
1437 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
,
1438 struct pt_regs
*regs
)
1440 struct ata_host_set
*host_set
= dev_instance
;
1441 unsigned int hc
, handled
= 0, n_hcs
;
1442 void __iomem
*mmio
= host_set
->mmio_base
;
1445 irq_stat
= readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
);
1447 /* check the cases where we either have nothing pending or have read
1448 * a bogus register value which can indicate HW removal or PCI fault
1450 if (!irq_stat
|| (0xffffffffU
== irq_stat
)) {
1454 n_hcs
= mv_get_hc_count(host_set
->ports
[0]->flags
);
1455 spin_lock(&host_set
->lock
);
1457 for (hc
= 0; hc
< n_hcs
; hc
++) {
1458 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
1460 mv_host_intr(host_set
, relevant
, hc
);
1464 if (PCI_ERR
& irq_stat
) {
1465 printk(KERN_ERR DRV_NAME
": PCI ERROR; PCI IRQ cause=0x%08x\n",
1466 readl(mmio
+ PCI_IRQ_CAUSE_OFS
));
1468 DPRINTK("All regs @ PCI error\n");
1469 mv_dump_all_regs(mmio
, -1, to_pci_dev(host_set
->dev
));
1471 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
1474 spin_unlock(&host_set
->lock
);
1476 return IRQ_RETVAL(handled
);
1479 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
1481 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
1482 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
1484 return hc_mmio
+ ofs
;
1487 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
1491 switch (sc_reg_in
) {
1495 ofs
= sc_reg_in
* sizeof(u32
);
1504 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
1506 void __iomem
*mmio
= mv5_phy_base(ap
->host_set
->mmio_base
, ap
->port_no
);
1507 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1509 if (ofs
!= 0xffffffffU
)
1510 return readl(mmio
+ ofs
);
1515 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1517 void __iomem
*mmio
= mv5_phy_base(ap
->host_set
->mmio_base
, ap
->port_no
);
1518 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1520 if (ofs
!= 0xffffffffU
)
1521 writelfl(val
, mmio
+ ofs
);
1524 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1529 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
1531 early_5080
= (pdev
->device
== 0x5080) && (rev_id
== 0);
1534 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1536 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1539 mv_reset_pci_bus(pdev
, mmio
);
1542 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1544 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL
);
1547 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1550 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
1553 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1555 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
1556 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
1559 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1563 writel(0, mmio
+ MV_GPIO_PORT_CTL
);
1565 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1567 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1569 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1572 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1575 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
1576 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1578 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
1581 tmp
= readl(phy_mmio
+ MV5_LT_MODE
);
1583 writel(tmp
, phy_mmio
+ MV5_LT_MODE
);
1585 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
1588 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
1591 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1593 tmp
|= hpriv
->signal
[port
].pre
;
1594 tmp
|= hpriv
->signal
[port
].amps
;
1595 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
1600 #define ZERO(reg) writel(0, port_mmio + (reg))
1601 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1604 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1606 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1608 mv_channel_reset(hpriv
, mmio
, port
);
1610 ZERO(0x028); /* command */
1611 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
1612 ZERO(0x004); /* timer */
1613 ZERO(0x008); /* irq err cause */
1614 ZERO(0x00c); /* irq err mask */
1615 ZERO(0x010); /* rq bah */
1616 ZERO(0x014); /* rq inp */
1617 ZERO(0x018); /* rq outp */
1618 ZERO(0x01c); /* respq bah */
1619 ZERO(0x024); /* respq outp */
1620 ZERO(0x020); /* respq inp */
1621 ZERO(0x02c); /* test control */
1622 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
1626 #define ZERO(reg) writel(0, hc_mmio + (reg))
1627 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1630 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1638 tmp
= readl(hc_mmio
+ 0x20);
1641 writel(tmp
, hc_mmio
+ 0x20);
1645 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1648 unsigned int hc
, port
;
1650 for (hc
= 0; hc
< n_hc
; hc
++) {
1651 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
1652 mv5_reset_hc_port(hpriv
, mmio
,
1653 (hc
* MV_PORTS_PER_HC
) + port
);
1655 mv5_reset_one_hc(hpriv
, mmio
, hc
);
1662 #define ZERO(reg) writel(0, mmio + (reg))
1663 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1667 tmp
= readl(mmio
+ MV_PCI_MODE
);
1669 writel(tmp
, mmio
+ MV_PCI_MODE
);
1671 ZERO(MV_PCI_DISC_TIMER
);
1672 ZERO(MV_PCI_MSI_TRIGGER
);
1673 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
1674 ZERO(HC_MAIN_IRQ_MASK_OFS
);
1675 ZERO(MV_PCI_SERR_MASK
);
1676 ZERO(PCI_IRQ_CAUSE_OFS
);
1677 ZERO(PCI_IRQ_MASK_OFS
);
1678 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
1679 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
1680 ZERO(MV_PCI_ERR_ATTRIBUTE
);
1681 ZERO(MV_PCI_ERR_COMMAND
);
1685 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1689 mv5_reset_flash(hpriv
, mmio
);
1691 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL
);
1693 tmp
|= (1 << 5) | (1 << 6);
1694 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL
);
1698 * mv6_reset_hc - Perform the 6xxx global soft reset
1699 * @mmio: base address of the HBA
1701 * This routine only applies to 6xxx parts.
1704 * Inherited from caller.
1706 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1709 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
1713 /* Following procedure defined in PCI "main command and status
1717 writel(t
| STOP_PCI_MASTER
, reg
);
1719 for (i
= 0; i
< 1000; i
++) {
1722 if (PCI_MASTER_EMPTY
& t
) {
1726 if (!(PCI_MASTER_EMPTY
& t
)) {
1727 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
1735 writel(t
| GLOB_SFT_RST
, reg
);
1738 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
1740 if (!(GLOB_SFT_RST
& t
)) {
1741 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
1746 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1749 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
1752 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
1754 if (GLOB_SFT_RST
& t
) {
1755 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
1762 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1765 void __iomem
*port_mmio
;
1768 tmp
= readl(mmio
+ MV_RESET_CFG
);
1769 if ((tmp
& (1 << 0)) == 0) {
1770 hpriv
->signal
[idx
].amps
= 0x7 << 8;
1771 hpriv
->signal
[idx
].pre
= 0x1 << 5;
1775 port_mmio
= mv_port_base(mmio
, idx
);
1776 tmp
= readl(port_mmio
+ PHY_MODE2
);
1778 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
1779 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
1782 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1784 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL
);
1787 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1790 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1792 u32 hp_flags
= hpriv
->hp_flags
;
1794 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1796 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1799 if (fix_phy_mode2
) {
1800 m2
= readl(port_mmio
+ PHY_MODE2
);
1803 writel(m2
, port_mmio
+ PHY_MODE2
);
1807 m2
= readl(port_mmio
+ PHY_MODE2
);
1808 m2
&= ~((1 << 16) | (1 << 31));
1809 writel(m2
, port_mmio
+ PHY_MODE2
);
1814 /* who knows what this magic does */
1815 tmp
= readl(port_mmio
+ PHY_MODE3
);
1818 writel(tmp
, port_mmio
+ PHY_MODE3
);
1820 if (fix_phy_mode4
) {
1823 m4
= readl(port_mmio
+ PHY_MODE4
);
1825 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1826 tmp
= readl(port_mmio
+ 0x310);
1828 m4
= (m4
& ~(1 << 1)) | (1 << 0);
1830 writel(m4
, port_mmio
+ PHY_MODE4
);
1832 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1833 writel(tmp
, port_mmio
+ 0x310);
1836 /* Revert values of pre-emphasis and signal amps to the saved ones */
1837 m2
= readl(port_mmio
+ PHY_MODE2
);
1839 m2
&= ~MV_M2_PREAMP_MASK
;
1840 m2
|= hpriv
->signal
[port
].amps
;
1841 m2
|= hpriv
->signal
[port
].pre
;
1844 /* according to mvSata 3.6.1, some IIE values are fixed */
1845 if (IS_GEN_IIE(hpriv
)) {
1850 writel(m2
, port_mmio
+ PHY_MODE2
);
1853 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1854 unsigned int port_no
)
1856 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
1858 writelfl(ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
1860 if (IS_60XX(hpriv
)) {
1861 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
1862 ifctl
|= (1 << 12) | (1 << 7);
1863 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
1866 udelay(25); /* allow reset propagation */
1868 /* Spec never mentions clearing the bit. Marvell's driver does
1869 * clear the bit, however.
1871 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
1873 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
1879 static void mv_stop_and_reset(struct ata_port
*ap
)
1881 struct mv_host_priv
*hpriv
= ap
->host_set
->private_data
;
1882 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
1886 mv_channel_reset(hpriv
, mmio
, ap
->port_no
);
1888 __mv_phy_reset(ap
, 0);
1891 static inline void __msleep(unsigned int msec
, int can_sleep
)
1900 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1901 * @ap: ATA channel to manipulate
1903 * Part of this is taken from __sata_phy_reset and modified to
1904 * not sleep since this routine gets called from interrupt level.
1907 * Inherited from caller. This is coded to safe to call at
1908 * interrupt level, i.e. it does not sleep.
1910 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
)
1912 struct mv_port_priv
*pp
= ap
->private_data
;
1913 struct mv_host_priv
*hpriv
= ap
->host_set
->private_data
;
1914 void __iomem
*port_mmio
= mv_ap_base(ap
);
1915 struct ata_taskfile tf
;
1916 struct ata_device
*dev
= &ap
->device
[0];
1917 unsigned long timeout
;
1921 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
1923 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1924 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
1925 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
1927 /* Issue COMRESET via SControl */
1929 scr_write_flush(ap
, SCR_CONTROL
, 0x301);
1930 __msleep(1, can_sleep
);
1932 scr_write_flush(ap
, SCR_CONTROL
, 0x300);
1933 __msleep(20, can_sleep
);
1935 timeout
= jiffies
+ msecs_to_jiffies(200);
1937 sstatus
= scr_read(ap
, SCR_STATUS
) & 0x3;
1938 if ((sstatus
== 3) || (sstatus
== 0))
1941 __msleep(1, can_sleep
);
1942 } while (time_before(jiffies
, timeout
));
1944 /* work around errata */
1945 if (IS_60XX(hpriv
) &&
1946 (sstatus
!= 0x0) && (sstatus
!= 0x113) && (sstatus
!= 0x123) &&
1948 goto comreset_retry
;
1950 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1951 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
1952 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
1954 if (sata_dev_present(ap
)) {
1957 printk(KERN_INFO
"ata%u: no device found (phy stat %08x)\n",
1958 ap
->id
, scr_read(ap
, SCR_STATUS
));
1959 ata_port_disable(ap
);
1962 ap
->cbl
= ATA_CBL_SATA
;
1964 /* even after SStatus reflects that device is ready,
1965 * it seems to take a while for link to be fully
1966 * established (and thus Status no longer 0x80/0x7F),
1967 * so we poll a bit for that, here.
1971 u8 drv_stat
= ata_check_status(ap
);
1972 if ((drv_stat
!= 0x80) && (drv_stat
!= 0x7f))
1974 __msleep(500, can_sleep
);
1979 tf
.lbah
= readb((void __iomem
*) ap
->ioaddr
.lbah_addr
);
1980 tf
.lbam
= readb((void __iomem
*) ap
->ioaddr
.lbam_addr
);
1981 tf
.lbal
= readb((void __iomem
*) ap
->ioaddr
.lbal_addr
);
1982 tf
.nsect
= readb((void __iomem
*) ap
->ioaddr
.nsect_addr
);
1984 dev
->class = ata_dev_classify(&tf
);
1985 if (!ata_dev_present(dev
)) {
1986 VPRINTK("Port disabled post-sig: No device present.\n");
1987 ata_port_disable(ap
);
1990 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1992 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1997 static void mv_phy_reset(struct ata_port
*ap
)
1999 __mv_phy_reset(ap
, 1);
2003 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2004 * @ap: ATA channel to manipulate
2006 * Intent is to clear all pending error conditions, reset the
2007 * chip/bus, fail the command, and move on.
2010 * This routine holds the host_set lock while failing the command.
2012 static void mv_eng_timeout(struct ata_port
*ap
)
2014 struct ata_queued_cmd
*qc
;
2016 printk(KERN_ERR
"ata%u: Entering mv_eng_timeout\n",ap
->id
);
2017 DPRINTK("All regs @ start of eng_timeout\n");
2018 mv_dump_all_regs(ap
->host_set
->mmio_base
, ap
->port_no
,
2019 to_pci_dev(ap
->host_set
->dev
));
2021 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
2022 printk(KERN_ERR
"mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2023 ap
->host_set
->mmio_base
, ap
, qc
, qc
->scsicmd
,
2024 &qc
->scsicmd
->cmnd
);
2027 mv_stop_and_reset(ap
);
2029 qc
->err_mask
|= AC_ERR_TIMEOUT
;
2030 ata_eh_qc_complete(qc
);
2034 * mv_port_init - Perform some early initialization on a single port.
2035 * @port: libata data structure storing shadow register addresses
2036 * @port_mmio: base address of the port
2038 * Initialize shadow register mmio addresses, clear outstanding
2039 * interrupts on the port, and unmask interrupts for the future
2040 * start of the port.
2043 * Inherited from caller.
2045 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2047 unsigned long shd_base
= (unsigned long) port_mmio
+ SHD_BLK_OFS
;
2050 /* PIO related setup
2052 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2054 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2055 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2056 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2057 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2058 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2059 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2061 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2062 /* special case: control/altstatus doesn't have ATA_REG_ address */
2063 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2066 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= 0;
2068 /* Clear any currently outstanding port interrupt conditions */
2069 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2070 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2071 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2073 /* unmask all EDMA error interrupts */
2074 writelfl(~0, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2076 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2077 readl(port_mmio
+ EDMA_CFG_OFS
),
2078 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2079 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2082 static int mv_chip_id(struct pci_dev
*pdev
, struct mv_host_priv
*hpriv
,
2083 unsigned int board_idx
)
2086 u32 hp_flags
= hpriv
->hp_flags
;
2088 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2092 hpriv
->ops
= &mv5xxx_ops
;
2093 hp_flags
|= MV_HP_50XX
;
2097 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2100 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2103 dev_printk(KERN_WARNING
, &pdev
->dev
,
2104 "Applying 50XXB2 workarounds to unknown rev\n");
2105 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2112 hpriv
->ops
= &mv5xxx_ops
;
2113 hp_flags
|= MV_HP_50XX
;
2117 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2120 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2123 dev_printk(KERN_WARNING
, &pdev
->dev
,
2124 "Applying B2 workarounds to unknown rev\n");
2125 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2132 hpriv
->ops
= &mv6xxx_ops
;
2136 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2139 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2142 dev_printk(KERN_WARNING
, &pdev
->dev
,
2143 "Applying B2 workarounds to unknown rev\n");
2144 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2151 hpriv
->ops
= &mv6xxx_ops
;
2153 hp_flags
|= MV_HP_GEN_IIE
;
2157 hp_flags
|= MV_HP_ERRATA_XX42A0
;
2160 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2163 dev_printk(KERN_WARNING
, &pdev
->dev
,
2164 "Applying 60X1C0 workarounds to unknown rev\n");
2165 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2171 printk(KERN_ERR DRV_NAME
": BUG: invalid board index %u\n", board_idx
);
2175 hpriv
->hp_flags
= hp_flags
;
2181 * mv_init_host - Perform some early initialization of the host.
2182 * @pdev: host PCI device
2183 * @probe_ent: early data struct representing the host
2185 * If possible, do an early global reset of the host. Then do
2186 * our port init and clear/unmask all/relevant host interrupts.
2189 * Inherited from caller.
2191 static int mv_init_host(struct pci_dev
*pdev
, struct ata_probe_ent
*probe_ent
,
2192 unsigned int board_idx
)
2194 int rc
= 0, n_hc
, port
, hc
;
2195 void __iomem
*mmio
= probe_ent
->mmio_base
;
2196 struct mv_host_priv
*hpriv
= probe_ent
->private_data
;
2198 /* global interrupt mask */
2199 writel(0, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2201 rc
= mv_chip_id(pdev
, hpriv
, board_idx
);
2205 n_hc
= mv_get_hc_count(probe_ent
->host_flags
);
2206 probe_ent
->n_ports
= MV_PORTS_PER_HC
* n_hc
;
2208 for (port
= 0; port
< probe_ent
->n_ports
; port
++)
2209 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
2211 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
2215 hpriv
->ops
->reset_flash(hpriv
, mmio
);
2216 hpriv
->ops
->reset_bus(pdev
, mmio
);
2217 hpriv
->ops
->enable_leds(hpriv
, mmio
);
2219 for (port
= 0; port
< probe_ent
->n_ports
; port
++) {
2220 if (IS_60XX(hpriv
)) {
2221 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2223 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2225 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2228 hpriv
->ops
->phy_errata(hpriv
, mmio
, port
);
2231 for (port
= 0; port
< probe_ent
->n_ports
; port
++) {
2232 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2233 mv_port_init(&probe_ent
->port
[port
], port_mmio
);
2236 for (hc
= 0; hc
< n_hc
; hc
++) {
2237 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2239 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2240 "(before clear)=0x%08x\n", hc
,
2241 readl(hc_mmio
+ HC_CFG_OFS
),
2242 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
2244 /* Clear any currently outstanding hc interrupt conditions */
2245 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2248 /* Clear any currently outstanding host interrupt conditions */
2249 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
2251 /* and unmask interrupt generation for host regs */
2252 writelfl(PCI_UNMASK_ALL_IRQS
, mmio
+ PCI_IRQ_MASK_OFS
);
2253 writelfl(~HC_MAIN_MASKED_IRQS
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2255 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2256 "PCI int cause/mask=0x%08x/0x%08x\n",
2257 readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
),
2258 readl(mmio
+ HC_MAIN_IRQ_MASK_OFS
),
2259 readl(mmio
+ PCI_IRQ_CAUSE_OFS
),
2260 readl(mmio
+ PCI_IRQ_MASK_OFS
));
2267 * mv_print_info - Dump key info to kernel log for perusal.
2268 * @probe_ent: early data struct representing the host
2270 * FIXME: complete this.
2273 * Inherited from caller.
2275 static void mv_print_info(struct ata_probe_ent
*probe_ent
)
2277 struct pci_dev
*pdev
= to_pci_dev(probe_ent
->dev
);
2278 struct mv_host_priv
*hpriv
= probe_ent
->private_data
;
2282 /* Use this to determine the HW stepping of the chip so we know
2283 * what errata to workaround
2285 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2287 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
2290 else if (scc
== 0x01)
2295 dev_printk(KERN_INFO
, &pdev
->dev
,
2296 "%u slots %u ports %s mode IRQ via %s\n",
2297 (unsigned)MV_MAX_Q_DEPTH
, probe_ent
->n_ports
,
2298 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
2302 * mv_init_one - handle a positive probe of a Marvell host
2303 * @pdev: PCI device found
2304 * @ent: PCI device ID entry for the matched host
2307 * Inherited from caller.
2309 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2311 static int printed_version
= 0;
2312 struct ata_probe_ent
*probe_ent
= NULL
;
2313 struct mv_host_priv
*hpriv
;
2314 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
2315 void __iomem
*mmio_base
;
2316 int pci_dev_busy
= 0, rc
;
2318 if (!printed_version
++)
2319 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2321 rc
= pci_enable_device(pdev
);
2326 rc
= pci_request_regions(pdev
, DRV_NAME
);
2332 probe_ent
= kmalloc(sizeof(*probe_ent
), GFP_KERNEL
);
2333 if (probe_ent
== NULL
) {
2335 goto err_out_regions
;
2338 memset(probe_ent
, 0, sizeof(*probe_ent
));
2339 probe_ent
->dev
= pci_dev_to_dev(pdev
);
2340 INIT_LIST_HEAD(&probe_ent
->node
);
2342 mmio_base
= pci_iomap(pdev
, MV_PRIMARY_BAR
, 0);
2343 if (mmio_base
== NULL
) {
2345 goto err_out_free_ent
;
2348 hpriv
= kmalloc(sizeof(*hpriv
), GFP_KERNEL
);
2351 goto err_out_iounmap
;
2353 memset(hpriv
, 0, sizeof(*hpriv
));
2355 probe_ent
->sht
= mv_port_info
[board_idx
].sht
;
2356 probe_ent
->host_flags
= mv_port_info
[board_idx
].host_flags
;
2357 probe_ent
->pio_mask
= mv_port_info
[board_idx
].pio_mask
;
2358 probe_ent
->udma_mask
= mv_port_info
[board_idx
].udma_mask
;
2359 probe_ent
->port_ops
= mv_port_info
[board_idx
].port_ops
;
2361 probe_ent
->irq
= pdev
->irq
;
2362 probe_ent
->irq_flags
= SA_SHIRQ
;
2363 probe_ent
->mmio_base
= mmio_base
;
2364 probe_ent
->private_data
= hpriv
;
2366 /* initialize adapter */
2367 rc
= mv_init_host(pdev
, probe_ent
, board_idx
);
2372 /* Enable interrupts */
2373 if (msi
&& pci_enable_msi(pdev
) == 0) {
2374 hpriv
->hp_flags
|= MV_HP_FLAG_MSI
;
2379 mv_dump_pci_cfg(pdev
, 0x68);
2380 mv_print_info(probe_ent
);
2382 if (ata_device_add(probe_ent
) == 0) {
2383 rc
= -ENODEV
; /* No devices discovered */
2384 goto err_out_dev_add
;
2391 if (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) {
2392 pci_disable_msi(pdev
);
2399 pci_iounmap(pdev
, mmio_base
);
2403 pci_release_regions(pdev
);
2405 if (!pci_dev_busy
) {
2406 pci_disable_device(pdev
);
2412 static int __init
mv_init(void)
2414 return pci_module_init(&mv_pci_driver
);
2417 static void __exit
mv_exit(void)
2419 pci_unregister_driver(&mv_pci_driver
);
2422 MODULE_AUTHOR("Brett Russ");
2423 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2424 MODULE_LICENSE("GPL");
2425 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
2426 MODULE_VERSION(DRV_VERSION
);
2428 module_param(msi
, int, 0444);
2429 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
2431 module_init(mv_init
);
2432 module_exit(mv_exit
);