2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/pci.h>
27 #include <linux/init.h>
28 #include <linux/blkdev.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/sched.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_cmnd.h>
36 #include <linux/libata.h>
39 #define DRV_NAME "sata_mv"
40 #define DRV_VERSION "0.6"
43 /* BAR's are enumerated in terms of pci_resource_start() terms */
44 MV_PRIMARY_BAR
= 0, /* offset 0x10: memory space */
45 MV_IO_BAR
= 2, /* offset 0x18: IO space */
46 MV_MISC_BAR
= 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
48 MV_MAJOR_REG_AREA_SZ
= 0x10000, /* 64KB */
49 MV_MINOR_REG_AREA_SZ
= 0x2000, /* 8KB */
52 MV_IRQ_COAL_REG_BASE
= 0x18000, /* 6xxx part only */
53 MV_SATAHC0_REG_BASE
= 0x20000,
54 MV_FLASH_CTL
= 0x1046c,
55 MV_GPIO_PORT_CTL
= 0x104f0,
56 MV_RESET_CFG
= 0x180d8,
58 MV_PCI_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
59 MV_SATAHC_REG_SZ
= MV_MAJOR_REG_AREA_SZ
,
60 MV_SATAHC_ARBTR_REG_SZ
= MV_MINOR_REG_AREA_SZ
, /* arbiter */
61 MV_PORT_REG_SZ
= MV_MINOR_REG_AREA_SZ
,
63 MV_USE_Q_DEPTH
= ATA_DEF_QUEUE
,
66 MV_MAX_Q_DEPTH_MASK
= MV_MAX_Q_DEPTH
- 1,
68 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
69 * CRPB needs alignment on a 256B boundary. Size == 256B
70 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
71 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
73 MV_CRQB_Q_SZ
= (32 * MV_MAX_Q_DEPTH
),
74 MV_CRPB_Q_SZ
= (8 * MV_MAX_Q_DEPTH
),
76 MV_SG_TBL_SZ
= (16 * MV_MAX_SG_CT
),
77 MV_PORT_PRIV_DMA_SZ
= (MV_CRQB_Q_SZ
+ MV_CRPB_Q_SZ
+ MV_SG_TBL_SZ
),
80 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
82 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
86 MV_FLAG_DUAL_HC
= (1 << 30), /* two SATA Host Controllers */
87 MV_FLAG_IRQ_COALESCE
= (1 << 29), /* IRQ coalescing capability */
88 MV_COMMON_FLAGS
= (ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
89 ATA_FLAG_SATA_RESET
| ATA_FLAG_MMIO
|
90 ATA_FLAG_PIO_POLLING
),
91 MV_6XXX_FLAGS
= MV_FLAG_IRQ_COALESCE
,
93 CRQB_FLAG_READ
= (1 << 0),
95 CRQB_CMD_ADDR_SHIFT
= 8,
96 CRQB_CMD_CS
= (0x2 << 11),
97 CRQB_CMD_LAST
= (1 << 15),
99 CRPB_FLAG_STATUS_SHIFT
= 8,
101 EPRD_FLAG_END_OF_TBL
= (1 << 31),
103 /* PCI interface registers */
105 PCI_COMMAND_OFS
= 0xc00,
107 PCI_MAIN_CMD_STS_OFS
= 0xd30,
108 STOP_PCI_MASTER
= (1 << 2),
109 PCI_MASTER_EMPTY
= (1 << 3),
110 GLOB_SFT_RST
= (1 << 4),
113 MV_PCI_EXP_ROM_BAR_CTL
= 0xd2c,
114 MV_PCI_DISC_TIMER
= 0xd04,
115 MV_PCI_MSI_TRIGGER
= 0xc38,
116 MV_PCI_SERR_MASK
= 0xc28,
117 MV_PCI_XBAR_TMOUT
= 0x1d04,
118 MV_PCI_ERR_LOW_ADDRESS
= 0x1d40,
119 MV_PCI_ERR_HIGH_ADDRESS
= 0x1d44,
120 MV_PCI_ERR_ATTRIBUTE
= 0x1d48,
121 MV_PCI_ERR_COMMAND
= 0x1d50,
123 PCI_IRQ_CAUSE_OFS
= 0x1d58,
124 PCI_IRQ_MASK_OFS
= 0x1d5c,
125 PCI_UNMASK_ALL_IRQS
= 0x7fffff, /* bits 22-0 */
127 HC_MAIN_IRQ_CAUSE_OFS
= 0x1d60,
128 HC_MAIN_IRQ_MASK_OFS
= 0x1d64,
129 PORT0_ERR
= (1 << 0), /* shift by port # */
130 PORT0_DONE
= (1 << 1), /* shift by port # */
131 HC0_IRQ_PEND
= 0x1ff, /* bits 0-8 = HC0's ports */
132 HC_SHIFT
= 9, /* bits 9-17 = HC1's ports */
134 TRAN_LO_DONE
= (1 << 19), /* 6xxx: IRQ coalescing */
135 TRAN_HI_DONE
= (1 << 20), /* 6xxx: IRQ coalescing */
136 PORTS_0_7_COAL_DONE
= (1 << 21), /* 6xxx: IRQ coalescing */
137 GPIO_INT
= (1 << 22),
138 SELF_INT
= (1 << 23),
139 TWSI_INT
= (1 << 24),
140 HC_MAIN_RSVD
= (0x7f << 25), /* bits 31-25 */
141 HC_MAIN_MASKED_IRQS
= (TRAN_LO_DONE
| TRAN_HI_DONE
|
142 PORTS_0_7_COAL_DONE
| GPIO_INT
| TWSI_INT
|
145 /* SATAHC registers */
148 HC_IRQ_CAUSE_OFS
= 0x14,
149 CRPB_DMA_DONE
= (1 << 0), /* shift by port # */
150 HC_IRQ_COAL
= (1 << 4), /* IRQ coalescing */
151 DEV_IRQ
= (1 << 8), /* shift by port # */
153 /* Shadow block registers */
155 SHD_CTL_AST_OFS
= 0x20, /* ofs from SHD_BLK_OFS */
158 SATA_STATUS_OFS
= 0x300, /* ctrl, err regs follow status */
159 SATA_ACTIVE_OFS
= 0x350,
166 SATA_INTERFACE_CTL
= 0x050,
168 MV_M2_PREAMP_MASK
= 0x7e0,
172 EDMA_CFG_Q_DEPTH
= 0, /* queueing disabled */
173 EDMA_CFG_NCQ
= (1 << 5),
174 EDMA_CFG_NCQ_GO_ON_ERR
= (1 << 14), /* continue on error */
175 EDMA_CFG_RD_BRST_EXT
= (1 << 11), /* read burst 512B */
176 EDMA_CFG_WR_BUFF_LEN
= (1 << 13), /* write buffer 512B */
178 EDMA_ERR_IRQ_CAUSE_OFS
= 0x8,
179 EDMA_ERR_IRQ_MASK_OFS
= 0xc,
180 EDMA_ERR_D_PAR
= (1 << 0),
181 EDMA_ERR_PRD_PAR
= (1 << 1),
182 EDMA_ERR_DEV
= (1 << 2),
183 EDMA_ERR_DEV_DCON
= (1 << 3),
184 EDMA_ERR_DEV_CON
= (1 << 4),
185 EDMA_ERR_SERR
= (1 << 5),
186 EDMA_ERR_SELF_DIS
= (1 << 7),
187 EDMA_ERR_BIST_ASYNC
= (1 << 8),
188 EDMA_ERR_CRBQ_PAR
= (1 << 9),
189 EDMA_ERR_CRPB_PAR
= (1 << 10),
190 EDMA_ERR_INTRL_PAR
= (1 << 11),
191 EDMA_ERR_IORDY
= (1 << 12),
192 EDMA_ERR_LNK_CTRL_RX
= (0xf << 13),
193 EDMA_ERR_LNK_CTRL_RX_2
= (1 << 15),
194 EDMA_ERR_LNK_DATA_RX
= (0xf << 17),
195 EDMA_ERR_LNK_CTRL_TX
= (0x1f << 21),
196 EDMA_ERR_LNK_DATA_TX
= (0x1f << 26),
197 EDMA_ERR_TRANS_PROTO
= (1 << 31),
198 EDMA_ERR_FATAL
= (EDMA_ERR_D_PAR
| EDMA_ERR_PRD_PAR
|
199 EDMA_ERR_DEV_DCON
| EDMA_ERR_CRBQ_PAR
|
200 EDMA_ERR_CRPB_PAR
| EDMA_ERR_INTRL_PAR
|
201 EDMA_ERR_IORDY
| EDMA_ERR_LNK_CTRL_RX_2
|
202 EDMA_ERR_LNK_DATA_RX
|
203 EDMA_ERR_LNK_DATA_TX
|
204 EDMA_ERR_TRANS_PROTO
),
206 EDMA_REQ_Q_BASE_HI_OFS
= 0x10,
207 EDMA_REQ_Q_IN_PTR_OFS
= 0x14, /* also contains BASE_LO */
209 EDMA_REQ_Q_OUT_PTR_OFS
= 0x18,
210 EDMA_REQ_Q_PTR_SHIFT
= 5,
212 EDMA_RSP_Q_BASE_HI_OFS
= 0x1c,
213 EDMA_RSP_Q_IN_PTR_OFS
= 0x20,
214 EDMA_RSP_Q_OUT_PTR_OFS
= 0x24, /* also contains BASE_LO */
215 EDMA_RSP_Q_PTR_SHIFT
= 3,
222 EDMA_IORDY_TMOUT
= 0x34,
225 /* Host private flags (hp_flags) */
226 MV_HP_FLAG_MSI
= (1 << 0),
227 MV_HP_ERRATA_50XXB0
= (1 << 1),
228 MV_HP_ERRATA_50XXB2
= (1 << 2),
229 MV_HP_ERRATA_60X1B2
= (1 << 3),
230 MV_HP_ERRATA_60X1C0
= (1 << 4),
231 MV_HP_ERRATA_XX42A0
= (1 << 5),
232 MV_HP_50XX
= (1 << 6),
233 MV_HP_GEN_IIE
= (1 << 7),
235 /* Port private flags (pp_flags) */
236 MV_PP_FLAG_EDMA_EN
= (1 << 0),
237 MV_PP_FLAG_EDMA_DS_ACT
= (1 << 1),
240 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
241 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
242 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
243 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
244 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
247 /* Our DMA boundary is determined by an ePRD being unable to handle
248 * anything larger than 64KB
250 MV_DMA_BOUNDARY
= 0xffffU
,
252 EDMA_REQ_Q_BASE_LO_MASK
= 0xfffffc00U
,
254 EDMA_RSP_Q_BASE_LO_MASK
= 0xffffff00U
,
267 /* Command ReQuest Block: 32B */
283 /* Command ResPonse Block: 8B */
290 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
298 struct mv_port_priv
{
299 struct mv_crqb
*crqb
;
301 struct mv_crpb
*crpb
;
303 struct mv_sg
*sg_tbl
;
304 dma_addr_t sg_tbl_dma
;
306 unsigned req_producer
; /* cp of req_in_ptr */
307 unsigned rsp_consumer
; /* cp of rsp_out_ptr */
311 struct mv_port_signal
{
318 void (*phy_errata
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
320 void (*enable_leds
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
321 void (*read_preamp
)(struct mv_host_priv
*hpriv
, int idx
,
323 int (*reset_hc
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
325 void (*reset_flash
)(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
326 void (*reset_bus
)(struct pci_dev
*pdev
, void __iomem
*mmio
);
329 struct mv_host_priv
{
331 struct mv_port_signal signal
[8];
332 const struct mv_hw_ops
*ops
;
335 static void mv_irq_clear(struct ata_port
*ap
);
336 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
337 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
338 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
);
339 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
);
340 static void mv_phy_reset(struct ata_port
*ap
);
341 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
);
342 static void mv_host_stop(struct ata_host_set
*host_set
);
343 static int mv_port_start(struct ata_port
*ap
);
344 static void mv_port_stop(struct ata_port
*ap
);
345 static void mv_qc_prep(struct ata_queued_cmd
*qc
);
346 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
);
347 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
);
348 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
,
349 struct pt_regs
*regs
);
350 static void mv_eng_timeout(struct ata_port
*ap
);
351 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
353 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
355 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
356 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
358 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
360 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
361 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
363 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
365 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
366 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
368 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
370 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
);
371 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
);
372 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
373 unsigned int port_no
);
374 static void mv_stop_and_reset(struct ata_port
*ap
);
376 static struct scsi_host_template mv_sht
= {
377 .module
= THIS_MODULE
,
379 .ioctl
= ata_scsi_ioctl
,
380 .queuecommand
= ata_scsi_queuecmd
,
381 .eh_strategy_handler
= ata_scsi_error
,
382 .can_queue
= MV_USE_Q_DEPTH
,
383 .this_id
= ATA_SHT_THIS_ID
,
384 .sg_tablesize
= MV_MAX_SG_CT
/ 2,
385 .max_sectors
= ATA_MAX_SECTORS
,
386 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
387 .emulated
= ATA_SHT_EMULATED
,
388 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
389 .proc_name
= DRV_NAME
,
390 .dma_boundary
= MV_DMA_BOUNDARY
,
391 .slave_configure
= ata_scsi_slave_config
,
392 .bios_param
= ata_std_bios_param
,
395 static const struct ata_port_operations mv5_ops
= {
396 .port_disable
= ata_port_disable
,
398 .tf_load
= ata_tf_load
,
399 .tf_read
= ata_tf_read
,
400 .check_status
= ata_check_status
,
401 .exec_command
= ata_exec_command
,
402 .dev_select
= ata_std_dev_select
,
404 .phy_reset
= mv_phy_reset
,
406 .qc_prep
= mv_qc_prep
,
407 .qc_issue
= mv_qc_issue
,
409 .eng_timeout
= mv_eng_timeout
,
411 .irq_handler
= mv_interrupt
,
412 .irq_clear
= mv_irq_clear
,
414 .scr_read
= mv5_scr_read
,
415 .scr_write
= mv5_scr_write
,
417 .port_start
= mv_port_start
,
418 .port_stop
= mv_port_stop
,
419 .host_stop
= mv_host_stop
,
422 static const struct ata_port_operations mv6_ops
= {
423 .port_disable
= ata_port_disable
,
425 .tf_load
= ata_tf_load
,
426 .tf_read
= ata_tf_read
,
427 .check_status
= ata_check_status
,
428 .exec_command
= ata_exec_command
,
429 .dev_select
= ata_std_dev_select
,
431 .phy_reset
= mv_phy_reset
,
433 .qc_prep
= mv_qc_prep
,
434 .qc_issue
= mv_qc_issue
,
436 .eng_timeout
= mv_eng_timeout
,
438 .irq_handler
= mv_interrupt
,
439 .irq_clear
= mv_irq_clear
,
441 .scr_read
= mv_scr_read
,
442 .scr_write
= mv_scr_write
,
444 .port_start
= mv_port_start
,
445 .port_stop
= mv_port_stop
,
446 .host_stop
= mv_host_stop
,
449 static const struct ata_port_operations mv_iie_ops
= {
450 .port_disable
= ata_port_disable
,
452 .tf_load
= ata_tf_load
,
453 .tf_read
= ata_tf_read
,
454 .check_status
= ata_check_status
,
455 .exec_command
= ata_exec_command
,
456 .dev_select
= ata_std_dev_select
,
458 .phy_reset
= mv_phy_reset
,
460 .qc_prep
= mv_qc_prep_iie
,
461 .qc_issue
= mv_qc_issue
,
463 .eng_timeout
= mv_eng_timeout
,
465 .irq_handler
= mv_interrupt
,
466 .irq_clear
= mv_irq_clear
,
468 .scr_read
= mv_scr_read
,
469 .scr_write
= mv_scr_write
,
471 .port_start
= mv_port_start
,
472 .port_stop
= mv_port_stop
,
473 .host_stop
= mv_host_stop
,
476 static const struct ata_port_info mv_port_info
[] = {
479 .host_flags
= MV_COMMON_FLAGS
,
480 .pio_mask
= 0x1f, /* pio0-4 */
481 .udma_mask
= 0x7f, /* udma0-6 */
482 .port_ops
= &mv5_ops
,
486 .host_flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
487 .pio_mask
= 0x1f, /* pio0-4 */
488 .udma_mask
= 0x7f, /* udma0-6 */
489 .port_ops
= &mv5_ops
,
493 .host_flags
= (MV_COMMON_FLAGS
| MV_FLAG_DUAL_HC
),
494 .pio_mask
= 0x1f, /* pio0-4 */
495 .udma_mask
= 0x7f, /* udma0-6 */
496 .port_ops
= &mv5_ops
,
500 .host_flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
501 .pio_mask
= 0x1f, /* pio0-4 */
502 .udma_mask
= 0x7f, /* udma0-6 */
503 .port_ops
= &mv6_ops
,
507 .host_flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
509 .pio_mask
= 0x1f, /* pio0-4 */
510 .udma_mask
= 0x7f, /* udma0-6 */
511 .port_ops
= &mv6_ops
,
515 .host_flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
),
516 .pio_mask
= 0x1f, /* pio0-4 */
517 .udma_mask
= 0x7f, /* udma0-6 */
518 .port_ops
= &mv_iie_ops
,
522 .host_flags
= (MV_COMMON_FLAGS
| MV_6XXX_FLAGS
|
524 .pio_mask
= 0x1f, /* pio0-4 */
525 .udma_mask
= 0x7f, /* udma0-6 */
526 .port_ops
= &mv_iie_ops
,
530 static const struct pci_device_id mv_pci_tbl
[] = {
531 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5040), 0, 0, chip_504x
},
532 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5041), 0, 0, chip_504x
},
533 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5080), 0, 0, chip_5080
},
534 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x5081), 0, 0, chip_508x
},
536 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6040), 0, 0, chip_604x
},
537 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6041), 0, 0, chip_604x
},
538 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6042), 0, 0, chip_6042
},
539 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6080), 0, 0, chip_608x
},
540 {PCI_DEVICE(PCI_VENDOR_ID_MARVELL
, 0x6081), 0, 0, chip_608x
},
542 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2
, 0x0241), 0, 0, chip_604x
},
543 {} /* terminate list */
546 static struct pci_driver mv_pci_driver
= {
548 .id_table
= mv_pci_tbl
,
549 .probe
= mv_init_one
,
550 .remove
= ata_pci_remove_one
,
553 static const struct mv_hw_ops mv5xxx_ops
= {
554 .phy_errata
= mv5_phy_errata
,
555 .enable_leds
= mv5_enable_leds
,
556 .read_preamp
= mv5_read_preamp
,
557 .reset_hc
= mv5_reset_hc
,
558 .reset_flash
= mv5_reset_flash
,
559 .reset_bus
= mv5_reset_bus
,
562 static const struct mv_hw_ops mv6xxx_ops
= {
563 .phy_errata
= mv6_phy_errata
,
564 .enable_leds
= mv6_enable_leds
,
565 .read_preamp
= mv6_read_preamp
,
566 .reset_hc
= mv6_reset_hc
,
567 .reset_flash
= mv6_reset_flash
,
568 .reset_bus
= mv_reset_pci_bus
,
574 static int msi
; /* Use PCI msi; either zero (off, default) or non-zero */
581 static inline void writelfl(unsigned long data
, void __iomem
*addr
)
584 (void) readl(addr
); /* flush to avoid PCI posted write */
587 static inline void __iomem
*mv_hc_base(void __iomem
*base
, unsigned int hc
)
589 return (base
+ MV_SATAHC0_REG_BASE
+ (hc
* MV_SATAHC_REG_SZ
));
592 static inline unsigned int mv_hc_from_port(unsigned int port
)
594 return port
>> MV_PORT_HC_SHIFT
;
597 static inline unsigned int mv_hardport_from_port(unsigned int port
)
599 return port
& MV_PORT_MASK
;
602 static inline void __iomem
*mv_hc_base_from_port(void __iomem
*base
,
605 return mv_hc_base(base
, mv_hc_from_port(port
));
608 static inline void __iomem
*mv_port_base(void __iomem
*base
, unsigned int port
)
610 return mv_hc_base_from_port(base
, port
) +
611 MV_SATAHC_ARBTR_REG_SZ
+
612 (mv_hardport_from_port(port
) * MV_PORT_REG_SZ
);
615 static inline void __iomem
*mv_ap_base(struct ata_port
*ap
)
617 return mv_port_base(ap
->host_set
->mmio_base
, ap
->port_no
);
620 static inline int mv_get_hc_count(unsigned long host_flags
)
622 return ((host_flags
& MV_FLAG_DUAL_HC
) ? 2 : 1);
625 static void mv_irq_clear(struct ata_port
*ap
)
630 * mv_start_dma - Enable eDMA engine
631 * @base: port base address
632 * @pp: port private data
634 * Verify the local cache of the eDMA state is accurate with an
638 * Inherited from caller.
640 static void mv_start_dma(void __iomem
*base
, struct mv_port_priv
*pp
)
642 if (!(MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
)) {
643 writelfl(EDMA_EN
, base
+ EDMA_CMD_OFS
);
644 pp
->pp_flags
|= MV_PP_FLAG_EDMA_EN
;
646 assert(EDMA_EN
& readl(base
+ EDMA_CMD_OFS
));
650 * mv_stop_dma - Disable eDMA engine
651 * @ap: ATA channel to manipulate
653 * Verify the local cache of the eDMA state is accurate with an
657 * Inherited from caller.
659 static void mv_stop_dma(struct ata_port
*ap
)
661 void __iomem
*port_mmio
= mv_ap_base(ap
);
662 struct mv_port_priv
*pp
= ap
->private_data
;
666 if (MV_PP_FLAG_EDMA_EN
& pp
->pp_flags
) {
667 /* Disable EDMA if active. The disable bit auto clears.
669 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
670 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
672 assert(!(EDMA_EN
& readl(port_mmio
+ EDMA_CMD_OFS
)));
675 /* now properly wait for the eDMA to stop */
676 for (i
= 1000; i
> 0; i
--) {
677 reg
= readl(port_mmio
+ EDMA_CMD_OFS
);
678 if (!(EDMA_EN
& reg
)) {
685 printk(KERN_ERR
"ata%u: Unable to stop eDMA\n", ap
->id
);
686 /* FIXME: Consider doing a reset here to recover */
691 static void mv_dump_mem(void __iomem
*start
, unsigned bytes
)
694 for (b
= 0; b
< bytes
; ) {
695 DPRINTK("%p: ", start
+ b
);
696 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
697 printk("%08x ",readl(start
+ b
));
705 static void mv_dump_pci_cfg(struct pci_dev
*pdev
, unsigned bytes
)
710 for (b
= 0; b
< bytes
; ) {
711 DPRINTK("%02x: ", b
);
712 for (w
= 0; b
< bytes
&& w
< 4; w
++) {
713 (void) pci_read_config_dword(pdev
,b
,&dw
);
721 static void mv_dump_all_regs(void __iomem
*mmio_base
, int port
,
722 struct pci_dev
*pdev
)
725 void __iomem
*hc_base
= mv_hc_base(mmio_base
,
726 port
>> MV_PORT_HC_SHIFT
);
727 void __iomem
*port_base
;
728 int start_port
, num_ports
, p
, start_hc
, num_hcs
, hc
;
731 start_hc
= start_port
= 0;
732 num_ports
= 8; /* shld be benign for 4 port devs */
735 start_hc
= port
>> MV_PORT_HC_SHIFT
;
737 num_ports
= num_hcs
= 1;
739 DPRINTK("All registers for port(s) %u-%u:\n", start_port
,
740 num_ports
> 1 ? num_ports
- 1 : start_port
);
743 DPRINTK("PCI config space regs:\n");
744 mv_dump_pci_cfg(pdev
, 0x68);
746 DPRINTK("PCI regs:\n");
747 mv_dump_mem(mmio_base
+0xc00, 0x3c);
748 mv_dump_mem(mmio_base
+0xd00, 0x34);
749 mv_dump_mem(mmio_base
+0xf00, 0x4);
750 mv_dump_mem(mmio_base
+0x1d00, 0x6c);
751 for (hc
= start_hc
; hc
< start_hc
+ num_hcs
; hc
++) {
752 hc_base
= mv_hc_base(mmio_base
, port
>> MV_PORT_HC_SHIFT
);
753 DPRINTK("HC regs (HC %i):\n", hc
);
754 mv_dump_mem(hc_base
, 0x1c);
756 for (p
= start_port
; p
< start_port
+ num_ports
; p
++) {
757 port_base
= mv_port_base(mmio_base
, p
);
758 DPRINTK("EDMA regs (port %i):\n",p
);
759 mv_dump_mem(port_base
, 0x54);
760 DPRINTK("SATA regs (port %i):\n",p
);
761 mv_dump_mem(port_base
+0x300, 0x60);
766 static unsigned int mv_scr_offset(unsigned int sc_reg_in
)
774 ofs
= SATA_STATUS_OFS
+ (sc_reg_in
* sizeof(u32
));
777 ofs
= SATA_ACTIVE_OFS
; /* active is not with the others */
786 static u32
mv_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
788 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
790 if (0xffffffffU
!= ofs
) {
791 return readl(mv_ap_base(ap
) + ofs
);
797 static void mv_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
799 unsigned int ofs
= mv_scr_offset(sc_reg_in
);
801 if (0xffffffffU
!= ofs
) {
802 writelfl(val
, mv_ap_base(ap
) + ofs
);
807 * mv_host_stop - Host specific cleanup/stop routine.
808 * @host_set: host data structure
810 * Disable ints, cleanup host memory, call general purpose
814 * Inherited from caller.
816 static void mv_host_stop(struct ata_host_set
*host_set
)
818 struct mv_host_priv
*hpriv
= host_set
->private_data
;
819 struct pci_dev
*pdev
= to_pci_dev(host_set
->dev
);
821 if (hpriv
->hp_flags
& MV_HP_FLAG_MSI
) {
822 pci_disable_msi(pdev
);
827 ata_host_stop(host_set
);
830 static inline void mv_priv_free(struct mv_port_priv
*pp
, struct device
*dev
)
832 dma_free_coherent(dev
, MV_PORT_PRIV_DMA_SZ
, pp
->crpb
, pp
->crpb_dma
);
835 static void mv_edma_cfg(struct mv_host_priv
*hpriv
, void __iomem
*port_mmio
)
837 u32 cfg
= readl(port_mmio
+ EDMA_CFG_OFS
);
839 /* set up non-NCQ EDMA configuration */
840 cfg
&= ~0x1f; /* clear queue depth */
841 cfg
&= ~EDMA_CFG_NCQ
; /* clear NCQ mode */
842 cfg
&= ~(1 << 9); /* disable equeue */
845 cfg
|= (1 << 8); /* enab config burst size mask */
847 else if (IS_GEN_II(hpriv
))
848 cfg
|= EDMA_CFG_RD_BRST_EXT
| EDMA_CFG_WR_BUFF_LEN
;
850 else if (IS_GEN_IIE(hpriv
)) {
851 cfg
|= (1 << 23); /* dis RX PM port mask */
852 cfg
&= ~(1 << 16); /* dis FIS-based switching (for now) */
853 cfg
&= ~(1 << 19); /* dis 128-entry queue (for now?) */
854 cfg
|= (1 << 18); /* enab early completion */
855 cfg
|= (1 << 17); /* enab host q cache */
856 cfg
|= (1 << 22); /* enab cutthrough */
859 writelfl(cfg
, port_mmio
+ EDMA_CFG_OFS
);
863 * mv_port_start - Port specific init/start routine.
864 * @ap: ATA channel to manipulate
866 * Allocate and point to DMA memory, init port private memory,
870 * Inherited from caller.
872 static int mv_port_start(struct ata_port
*ap
)
874 struct device
*dev
= ap
->host_set
->dev
;
875 struct mv_host_priv
*hpriv
= ap
->host_set
->private_data
;
876 struct mv_port_priv
*pp
;
877 void __iomem
*port_mmio
= mv_ap_base(ap
);
882 pp
= kmalloc(sizeof(*pp
), GFP_KERNEL
);
885 memset(pp
, 0, sizeof(*pp
));
887 mem
= dma_alloc_coherent(dev
, MV_PORT_PRIV_DMA_SZ
, &mem_dma
,
891 memset(mem
, 0, MV_PORT_PRIV_DMA_SZ
);
893 rc
= ata_pad_alloc(ap
, dev
);
897 /* First item in chunk of DMA memory:
898 * 32-slot command request table (CRQB), 32 bytes each in size
901 pp
->crqb_dma
= mem_dma
;
903 mem_dma
+= MV_CRQB_Q_SZ
;
906 * 32-slot command response table (CRPB), 8 bytes each in size
909 pp
->crpb_dma
= mem_dma
;
911 mem_dma
+= MV_CRPB_Q_SZ
;
914 * Table of scatter-gather descriptors (ePRD), 16 bytes each
917 pp
->sg_tbl_dma
= mem_dma
;
919 mv_edma_cfg(hpriv
, port_mmio
);
921 writel((pp
->crqb_dma
>> 16) >> 16, port_mmio
+ EDMA_REQ_Q_BASE_HI_OFS
);
922 writelfl(pp
->crqb_dma
& EDMA_REQ_Q_BASE_LO_MASK
,
923 port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
925 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
926 writelfl(pp
->crqb_dma
& 0xffffffff,
927 port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
929 writelfl(0, port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
);
931 writel((pp
->crpb_dma
>> 16) >> 16, port_mmio
+ EDMA_RSP_Q_BASE_HI_OFS
);
933 if (hpriv
->hp_flags
& MV_HP_ERRATA_XX42A0
)
934 writelfl(pp
->crpb_dma
& 0xffffffff,
935 port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
937 writelfl(0, port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
);
939 writelfl(pp
->crpb_dma
& EDMA_RSP_Q_BASE_LO_MASK
,
940 port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
942 pp
->req_producer
= pp
->rsp_consumer
= 0;
944 /* Don't turn on EDMA here...do it before DMA commands only. Else
945 * we'll be unable to send non-data, PIO, etc due to restricted access
948 ap
->private_data
= pp
;
952 mv_priv_free(pp
, dev
);
960 * mv_port_stop - Port specific cleanup/stop routine.
961 * @ap: ATA channel to manipulate
963 * Stop DMA, cleanup port memory.
966 * This routine uses the host_set lock to protect the DMA stop.
968 static void mv_port_stop(struct ata_port
*ap
)
970 struct device
*dev
= ap
->host_set
->dev
;
971 struct mv_port_priv
*pp
= ap
->private_data
;
974 spin_lock_irqsave(&ap
->host_set
->lock
, flags
);
976 spin_unlock_irqrestore(&ap
->host_set
->lock
, flags
);
978 ap
->private_data
= NULL
;
979 ata_pad_free(ap
, dev
);
980 mv_priv_free(pp
, dev
);
985 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
986 * @qc: queued command whose SG list to source from
988 * Populate the SG list and mark the last entry.
991 * Inherited from caller.
993 static void mv_fill_sg(struct ata_queued_cmd
*qc
)
995 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
997 struct scatterlist
*sg
;
999 ata_for_each_sg(sg
, qc
) {
1001 u32 sg_len
, len
, offset
;
1003 addr
= sg_dma_address(sg
);
1004 sg_len
= sg_dma_len(sg
);
1007 offset
= addr
& MV_DMA_BOUNDARY
;
1009 if ((offset
+ sg_len
) > 0x10000)
1010 len
= 0x10000 - offset
;
1012 pp
->sg_tbl
[i
].addr
= cpu_to_le32(addr
& 0xffffffff);
1013 pp
->sg_tbl
[i
].addr_hi
= cpu_to_le32((addr
>> 16) >> 16);
1014 pp
->sg_tbl
[i
].flags_size
= cpu_to_le32(len
);
1019 if (!sg_len
&& ata_sg_is_last(sg
, qc
))
1020 pp
->sg_tbl
[i
].flags_size
|= cpu_to_le32(EPRD_FLAG_END_OF_TBL
);
1027 static inline unsigned mv_inc_q_index(unsigned *index
)
1029 *index
= (*index
+ 1) & MV_MAX_Q_DEPTH_MASK
;
1033 static inline void mv_crqb_pack_cmd(u16
*cmdw
, u8 data
, u8 addr
, unsigned last
)
1035 *cmdw
= data
| (addr
<< CRQB_CMD_ADDR_SHIFT
) | CRQB_CMD_CS
|
1036 (last
? CRQB_CMD_LAST
: 0);
1040 * mv_qc_prep - Host specific command preparation.
1041 * @qc: queued command to prepare
1043 * This routine simply redirects to the general purpose routine
1044 * if command is not DMA. Else, it handles prep of the CRQB
1045 * (command request block), does some sanity checking, and calls
1046 * the SG load routine.
1049 * Inherited from caller.
1051 static void mv_qc_prep(struct ata_queued_cmd
*qc
)
1053 struct ata_port
*ap
= qc
->ap
;
1054 struct mv_port_priv
*pp
= ap
->private_data
;
1056 struct ata_taskfile
*tf
;
1059 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1062 /* the req producer index should be the same as we remember it */
1063 assert(((readl(mv_ap_base(qc
->ap
) + EDMA_REQ_Q_IN_PTR_OFS
) >>
1064 EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) ==
1067 /* Fill in command request block
1069 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1070 flags
|= CRQB_FLAG_READ
;
1071 assert(MV_MAX_Q_DEPTH
> qc
->tag
);
1072 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1074 pp
->crqb
[pp
->req_producer
].sg_addr
=
1075 cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1076 pp
->crqb
[pp
->req_producer
].sg_addr_hi
=
1077 cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1078 pp
->crqb
[pp
->req_producer
].ctrl_flags
= cpu_to_le16(flags
);
1080 cw
= &pp
->crqb
[pp
->req_producer
].ata_cmd
[0];
1083 /* Sadly, the CRQB cannot accomodate all registers--there are
1084 * only 11 bytes...so we must pick and choose required
1085 * registers based on the command. So, we drop feature and
1086 * hob_feature for [RW] DMA commands, but they are needed for
1087 * NCQ. NCQ will drop hob_nsect.
1089 switch (tf
->command
) {
1091 case ATA_CMD_READ_EXT
:
1093 case ATA_CMD_WRITE_EXT
:
1094 mv_crqb_pack_cmd(cw
++, tf
->hob_nsect
, ATA_REG_NSECT
, 0);
1096 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1097 case ATA_CMD_FPDMA_READ
:
1098 case ATA_CMD_FPDMA_WRITE
:
1099 mv_crqb_pack_cmd(cw
++, tf
->hob_feature
, ATA_REG_FEATURE
, 0);
1100 mv_crqb_pack_cmd(cw
++, tf
->feature
, ATA_REG_FEATURE
, 0);
1102 #endif /* FIXME: remove this line when NCQ added */
1104 /* The only other commands EDMA supports in non-queued and
1105 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1106 * of which are defined/used by Linux. If we get here, this
1107 * driver needs work.
1109 * FIXME: modify libata to give qc_prep a return value and
1110 * return error here.
1112 BUG_ON(tf
->command
);
1115 mv_crqb_pack_cmd(cw
++, tf
->nsect
, ATA_REG_NSECT
, 0);
1116 mv_crqb_pack_cmd(cw
++, tf
->hob_lbal
, ATA_REG_LBAL
, 0);
1117 mv_crqb_pack_cmd(cw
++, tf
->lbal
, ATA_REG_LBAL
, 0);
1118 mv_crqb_pack_cmd(cw
++, tf
->hob_lbam
, ATA_REG_LBAM
, 0);
1119 mv_crqb_pack_cmd(cw
++, tf
->lbam
, ATA_REG_LBAM
, 0);
1120 mv_crqb_pack_cmd(cw
++, tf
->hob_lbah
, ATA_REG_LBAH
, 0);
1121 mv_crqb_pack_cmd(cw
++, tf
->lbah
, ATA_REG_LBAH
, 0);
1122 mv_crqb_pack_cmd(cw
++, tf
->device
, ATA_REG_DEVICE
, 0);
1123 mv_crqb_pack_cmd(cw
++, tf
->command
, ATA_REG_CMD
, 1); /* last */
1125 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1131 * mv_qc_prep_iie - Host specific command preparation.
1132 * @qc: queued command to prepare
1134 * This routine simply redirects to the general purpose routine
1135 * if command is not DMA. Else, it handles prep of the CRQB
1136 * (command request block), does some sanity checking, and calls
1137 * the SG load routine.
1140 * Inherited from caller.
1142 static void mv_qc_prep_iie(struct ata_queued_cmd
*qc
)
1144 struct ata_port
*ap
= qc
->ap
;
1145 struct mv_port_priv
*pp
= ap
->private_data
;
1146 struct mv_crqb_iie
*crqb
;
1147 struct ata_taskfile
*tf
;
1150 if (ATA_PROT_DMA
!= qc
->tf
.protocol
)
1153 /* the req producer index should be the same as we remember it */
1154 assert(((readl(mv_ap_base(qc
->ap
) + EDMA_REQ_Q_IN_PTR_OFS
) >>
1155 EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) ==
1158 /* Fill in Gen IIE command request block
1160 if (!(qc
->tf
.flags
& ATA_TFLAG_WRITE
))
1161 flags
|= CRQB_FLAG_READ
;
1163 assert(MV_MAX_Q_DEPTH
> qc
->tag
);
1164 flags
|= qc
->tag
<< CRQB_TAG_SHIFT
;
1166 crqb
= (struct mv_crqb_iie
*) &pp
->crqb
[pp
->req_producer
];
1167 crqb
->addr
= cpu_to_le32(pp
->sg_tbl_dma
& 0xffffffff);
1168 crqb
->addr_hi
= cpu_to_le32((pp
->sg_tbl_dma
>> 16) >> 16);
1169 crqb
->flags
= cpu_to_le32(flags
);
1172 crqb
->ata_cmd
[0] = cpu_to_le32(
1173 (tf
->command
<< 16) |
1176 crqb
->ata_cmd
[1] = cpu_to_le32(
1182 crqb
->ata_cmd
[2] = cpu_to_le32(
1183 (tf
->hob_lbal
<< 0) |
1184 (tf
->hob_lbam
<< 8) |
1185 (tf
->hob_lbah
<< 16) |
1186 (tf
->hob_feature
<< 24)
1188 crqb
->ata_cmd
[3] = cpu_to_le32(
1190 (tf
->hob_nsect
<< 8)
1193 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
1199 * mv_qc_issue - Initiate a command to the host
1200 * @qc: queued command to start
1202 * This routine simply redirects to the general purpose routine
1203 * if command is not DMA. Else, it sanity checks our local
1204 * caches of the request producer/consumer indices then enables
1205 * DMA and bumps the request producer index.
1208 * Inherited from caller.
1210 static unsigned int mv_qc_issue(struct ata_queued_cmd
*qc
)
1212 void __iomem
*port_mmio
= mv_ap_base(qc
->ap
);
1213 struct mv_port_priv
*pp
= qc
->ap
->private_data
;
1216 if (ATA_PROT_DMA
!= qc
->tf
.protocol
) {
1217 /* We're about to send a non-EDMA capable command to the
1218 * port. Turn off EDMA so there won't be problems accessing
1219 * shadow block, etc registers.
1221 mv_stop_dma(qc
->ap
);
1222 return ata_qc_issue_prot(qc
);
1225 in_ptr
= readl(port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1227 /* the req producer index should be the same as we remember it */
1228 assert(((in_ptr
>> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) ==
1230 /* until we do queuing, the queue should be empty at this point */
1231 assert(((in_ptr
>> EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) ==
1232 ((readl(port_mmio
+ EDMA_REQ_Q_OUT_PTR_OFS
) >>
1233 EDMA_REQ_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
));
1235 mv_inc_q_index(&pp
->req_producer
); /* now incr producer index */
1237 mv_start_dma(port_mmio
, pp
);
1239 /* and write the request in pointer to kick the EDMA to life */
1240 in_ptr
&= EDMA_REQ_Q_BASE_LO_MASK
;
1241 in_ptr
|= pp
->req_producer
<< EDMA_REQ_Q_PTR_SHIFT
;
1242 writelfl(in_ptr
, port_mmio
+ EDMA_REQ_Q_IN_PTR_OFS
);
1248 * mv_get_crpb_status - get status from most recently completed cmd
1249 * @ap: ATA channel to manipulate
1251 * This routine is for use when the port is in DMA mode, when it
1252 * will be using the CRPB (command response block) method of
1253 * returning command completion information. We assert indices
1254 * are good, grab status, and bump the response consumer index to
1255 * prove that we're up to date.
1258 * Inherited from caller.
1260 static u8
mv_get_crpb_status(struct ata_port
*ap
)
1262 void __iomem
*port_mmio
= mv_ap_base(ap
);
1263 struct mv_port_priv
*pp
= ap
->private_data
;
1266 out_ptr
= readl(port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1268 /* the response consumer index should be the same as we remember it */
1269 assert(((out_ptr
>> EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) ==
1272 /* increment our consumer index... */
1273 pp
->rsp_consumer
= mv_inc_q_index(&pp
->rsp_consumer
);
1275 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1276 assert(((readl(port_mmio
+ EDMA_RSP_Q_IN_PTR_OFS
) >>
1277 EDMA_RSP_Q_PTR_SHIFT
) & MV_MAX_Q_DEPTH_MASK
) ==
1280 /* write out our inc'd consumer index so EDMA knows we're caught up */
1281 out_ptr
&= EDMA_RSP_Q_BASE_LO_MASK
;
1282 out_ptr
|= pp
->rsp_consumer
<< EDMA_RSP_Q_PTR_SHIFT
;
1283 writelfl(out_ptr
, port_mmio
+ EDMA_RSP_Q_OUT_PTR_OFS
);
1285 /* Return ATA status register for completed CRPB */
1286 return (pp
->crpb
[pp
->rsp_consumer
].flags
>> CRPB_FLAG_STATUS_SHIFT
);
1290 * mv_err_intr - Handle error interrupts on the port
1291 * @ap: ATA channel to manipulate
1293 * In most cases, just clear the interrupt and move on. However,
1294 * some cases require an eDMA reset, which is done right before
1295 * the COMRESET in mv_phy_reset(). The SERR case requires a
1296 * clear of pending errors in the SATA SERROR register. Finally,
1297 * if the port disabled DMA, update our cached copy to match.
1300 * Inherited from caller.
1302 static void mv_err_intr(struct ata_port
*ap
)
1304 void __iomem
*port_mmio
= mv_ap_base(ap
);
1305 u32 edma_err_cause
, serr
= 0;
1307 edma_err_cause
= readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1309 if (EDMA_ERR_SERR
& edma_err_cause
) {
1310 serr
= scr_read(ap
, SCR_ERROR
);
1311 scr_write_flush(ap
, SCR_ERROR
, serr
);
1313 if (EDMA_ERR_SELF_DIS
& edma_err_cause
) {
1314 struct mv_port_priv
*pp
= ap
->private_data
;
1315 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1317 DPRINTK(KERN_ERR
"ata%u: port error; EDMA err cause: 0x%08x "
1318 "SERR: 0x%08x\n", ap
->id
, edma_err_cause
, serr
);
1320 /* Clear EDMA now that SERR cleanup done */
1321 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1323 /* check for fatal here and recover if needed */
1324 if (EDMA_ERR_FATAL
& edma_err_cause
) {
1325 mv_stop_and_reset(ap
);
1330 * mv_host_intr - Handle all interrupts on the given host controller
1331 * @host_set: host specific structure
1332 * @relevant: port error bits relevant to this host controller
1333 * @hc: which host controller we're to look at
1335 * Read then write clear the HC interrupt status then walk each
1336 * port connected to the HC and see if it needs servicing. Port
1337 * success ints are reported in the HC interrupt status reg, the
1338 * port error ints are reported in the higher level main
1339 * interrupt status register and thus are passed in via the
1340 * 'relevant' argument.
1343 * Inherited from caller.
1345 static void mv_host_intr(struct ata_host_set
*host_set
, u32 relevant
,
1348 void __iomem
*mmio
= host_set
->mmio_base
;
1349 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1350 struct ata_port
*ap
;
1351 struct ata_queued_cmd
*qc
;
1353 int shift
, port
, port0
, hard_port
, handled
;
1354 unsigned int err_mask
;
1360 port0
= MV_PORTS_PER_HC
;
1363 /* we'll need the HC success int register in most cases */
1364 hc_irq_cause
= readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1366 writelfl(~hc_irq_cause
, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
1369 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1370 hc
,relevant
,hc_irq_cause
);
1372 for (port
= port0
; port
< port0
+ MV_PORTS_PER_HC
; port
++) {
1373 ap
= host_set
->ports
[port
];
1374 hard_port
= port
& MV_PORT_MASK
; /* range 0-3 */
1375 handled
= 0; /* ensure ata_status is set if handled++ */
1377 if ((CRPB_DMA_DONE
<< hard_port
) & hc_irq_cause
) {
1378 /* new CRPB on the queue; just one at a time until NCQ
1380 ata_status
= mv_get_crpb_status(ap
);
1382 } else if ((DEV_IRQ
<< hard_port
) & hc_irq_cause
) {
1383 /* received ATA IRQ; read the status reg to clear INTRQ
1385 ata_status
= readb((void __iomem
*)
1386 ap
->ioaddr
.status_addr
);
1390 if (ap
&& (ap
->flags
& ATA_FLAG_PORT_DISABLED
))
1393 err_mask
= ac_err_mask(ata_status
);
1395 shift
= port
<< 1; /* (port * 2) */
1396 if (port
>= MV_PORTS_PER_HC
) {
1397 shift
++; /* skip bit 8 in the HC Main IRQ reg */
1399 if ((PORT0_ERR
<< shift
) & relevant
) {
1401 err_mask
|= AC_ERR_OTHER
;
1405 if (handled
&& ap
) {
1406 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
1408 VPRINTK("port %u IRQ found for qc, "
1409 "ata_status 0x%x\n", port
,ata_status
);
1410 /* mark qc status appropriately */
1411 if (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)) {
1412 qc
->err_mask
|= err_mask
;
1413 ata_qc_complete(qc
);
1424 * @dev_instance: private data; in this case the host structure
1427 * Read the read only register to determine if any host
1428 * controllers have pending interrupts. If so, call lower level
1429 * routine to handle. Also check for PCI errors which are only
1433 * This routine holds the host_set lock while processing pending
1436 static irqreturn_t
mv_interrupt(int irq
, void *dev_instance
,
1437 struct pt_regs
*regs
)
1439 struct ata_host_set
*host_set
= dev_instance
;
1440 unsigned int hc
, handled
= 0, n_hcs
;
1441 void __iomem
*mmio
= host_set
->mmio_base
;
1444 irq_stat
= readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
);
1446 /* check the cases where we either have nothing pending or have read
1447 * a bogus register value which can indicate HW removal or PCI fault
1449 if (!irq_stat
|| (0xffffffffU
== irq_stat
)) {
1453 n_hcs
= mv_get_hc_count(host_set
->ports
[0]->flags
);
1454 spin_lock(&host_set
->lock
);
1456 for (hc
= 0; hc
< n_hcs
; hc
++) {
1457 u32 relevant
= irq_stat
& (HC0_IRQ_PEND
<< (hc
* HC_SHIFT
));
1459 mv_host_intr(host_set
, relevant
, hc
);
1463 if (PCI_ERR
& irq_stat
) {
1464 printk(KERN_ERR DRV_NAME
": PCI ERROR; PCI IRQ cause=0x%08x\n",
1465 readl(mmio
+ PCI_IRQ_CAUSE_OFS
));
1467 DPRINTK("All regs @ PCI error\n");
1468 mv_dump_all_regs(mmio
, -1, to_pci_dev(host_set
->dev
));
1470 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
1473 spin_unlock(&host_set
->lock
);
1475 return IRQ_RETVAL(handled
);
1478 static void __iomem
*mv5_phy_base(void __iomem
*mmio
, unsigned int port
)
1480 void __iomem
*hc_mmio
= mv_hc_base_from_port(mmio
, port
);
1481 unsigned long ofs
= (mv_hardport_from_port(port
) + 1) * 0x100UL
;
1483 return hc_mmio
+ ofs
;
1486 static unsigned int mv5_scr_offset(unsigned int sc_reg_in
)
1490 switch (sc_reg_in
) {
1494 ofs
= sc_reg_in
* sizeof(u32
);
1503 static u32
mv5_scr_read(struct ata_port
*ap
, unsigned int sc_reg_in
)
1505 void __iomem
*mmio
= mv5_phy_base(ap
->host_set
->mmio_base
, ap
->port_no
);
1506 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1508 if (ofs
!= 0xffffffffU
)
1509 return readl(mmio
+ ofs
);
1514 static void mv5_scr_write(struct ata_port
*ap
, unsigned int sc_reg_in
, u32 val
)
1516 void __iomem
*mmio
= mv5_phy_base(ap
->host_set
->mmio_base
, ap
->port_no
);
1517 unsigned int ofs
= mv5_scr_offset(sc_reg_in
);
1519 if (ofs
!= 0xffffffffU
)
1520 writelfl(val
, mmio
+ ofs
);
1523 static void mv5_reset_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1528 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
1530 early_5080
= (pdev
->device
== 0x5080) && (rev_id
== 0);
1533 u32 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1535 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1538 mv_reset_pci_bus(pdev
, mmio
);
1541 static void mv5_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1543 writel(0x0fcfffff, mmio
+ MV_FLASH_CTL
);
1546 static void mv5_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1549 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, idx
);
1552 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1554 hpriv
->signal
[idx
].pre
= tmp
& 0x1800; /* bits 12:11 */
1555 hpriv
->signal
[idx
].amps
= tmp
& 0xe0; /* bits 7:5 */
1558 static void mv5_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1562 writel(0, mmio
+ MV_GPIO_PORT_CTL
);
1564 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1566 tmp
= readl(mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1568 writel(tmp
, mmio
+ MV_PCI_EXP_ROM_BAR_CTL
);
1571 static void mv5_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1574 void __iomem
*phy_mmio
= mv5_phy_base(mmio
, port
);
1575 const u32 mask
= (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1577 int fix_apm_sq
= (hpriv
->hp_flags
& MV_HP_ERRATA_50XXB0
);
1580 tmp
= readl(phy_mmio
+ MV5_LT_MODE
);
1582 writel(tmp
, phy_mmio
+ MV5_LT_MODE
);
1584 tmp
= readl(phy_mmio
+ MV5_PHY_CTL
);
1587 writel(tmp
, phy_mmio
+ MV5_PHY_CTL
);
1590 tmp
= readl(phy_mmio
+ MV5_PHY_MODE
);
1592 tmp
|= hpriv
->signal
[port
].pre
;
1593 tmp
|= hpriv
->signal
[port
].amps
;
1594 writel(tmp
, phy_mmio
+ MV5_PHY_MODE
);
1599 #define ZERO(reg) writel(0, port_mmio + (reg))
1600 static void mv5_reset_hc_port(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1603 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1605 writelfl(EDMA_DS
, port_mmio
+ EDMA_CMD_OFS
);
1607 mv_channel_reset(hpriv
, mmio
, port
);
1609 ZERO(0x028); /* command */
1610 writel(0x11f, port_mmio
+ EDMA_CFG_OFS
);
1611 ZERO(0x004); /* timer */
1612 ZERO(0x008); /* irq err cause */
1613 ZERO(0x00c); /* irq err mask */
1614 ZERO(0x010); /* rq bah */
1615 ZERO(0x014); /* rq inp */
1616 ZERO(0x018); /* rq outp */
1617 ZERO(0x01c); /* respq bah */
1618 ZERO(0x024); /* respq outp */
1619 ZERO(0x020); /* respq inp */
1620 ZERO(0x02c); /* test control */
1621 writel(0xbc, port_mmio
+ EDMA_IORDY_TMOUT
);
1625 #define ZERO(reg) writel(0, hc_mmio + (reg))
1626 static void mv5_reset_one_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1629 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
1637 tmp
= readl(hc_mmio
+ 0x20);
1640 writel(tmp
, hc_mmio
+ 0x20);
1644 static int mv5_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1647 unsigned int hc
, port
;
1649 for (hc
= 0; hc
< n_hc
; hc
++) {
1650 for (port
= 0; port
< MV_PORTS_PER_HC
; port
++)
1651 mv5_reset_hc_port(hpriv
, mmio
,
1652 (hc
* MV_PORTS_PER_HC
) + port
);
1654 mv5_reset_one_hc(hpriv
, mmio
, hc
);
1661 #define ZERO(reg) writel(0, mmio + (reg))
1662 static void mv_reset_pci_bus(struct pci_dev
*pdev
, void __iomem
*mmio
)
1666 tmp
= readl(mmio
+ MV_PCI_MODE
);
1668 writel(tmp
, mmio
+ MV_PCI_MODE
);
1670 ZERO(MV_PCI_DISC_TIMER
);
1671 ZERO(MV_PCI_MSI_TRIGGER
);
1672 writel(0x000100ff, mmio
+ MV_PCI_XBAR_TMOUT
);
1673 ZERO(HC_MAIN_IRQ_MASK_OFS
);
1674 ZERO(MV_PCI_SERR_MASK
);
1675 ZERO(PCI_IRQ_CAUSE_OFS
);
1676 ZERO(PCI_IRQ_MASK_OFS
);
1677 ZERO(MV_PCI_ERR_LOW_ADDRESS
);
1678 ZERO(MV_PCI_ERR_HIGH_ADDRESS
);
1679 ZERO(MV_PCI_ERR_ATTRIBUTE
);
1680 ZERO(MV_PCI_ERR_COMMAND
);
1684 static void mv6_reset_flash(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1688 mv5_reset_flash(hpriv
, mmio
);
1690 tmp
= readl(mmio
+ MV_GPIO_PORT_CTL
);
1692 tmp
|= (1 << 5) | (1 << 6);
1693 writel(tmp
, mmio
+ MV_GPIO_PORT_CTL
);
1697 * mv6_reset_hc - Perform the 6xxx global soft reset
1698 * @mmio: base address of the HBA
1700 * This routine only applies to 6xxx parts.
1703 * Inherited from caller.
1705 static int mv6_reset_hc(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1708 void __iomem
*reg
= mmio
+ PCI_MAIN_CMD_STS_OFS
;
1712 /* Following procedure defined in PCI "main command and status
1716 writel(t
| STOP_PCI_MASTER
, reg
);
1718 for (i
= 0; i
< 1000; i
++) {
1721 if (PCI_MASTER_EMPTY
& t
) {
1725 if (!(PCI_MASTER_EMPTY
& t
)) {
1726 printk(KERN_ERR DRV_NAME
": PCI master won't flush\n");
1734 writel(t
| GLOB_SFT_RST
, reg
);
1737 } while (!(GLOB_SFT_RST
& t
) && (i
-- > 0));
1739 if (!(GLOB_SFT_RST
& t
)) {
1740 printk(KERN_ERR DRV_NAME
": can't set global reset\n");
1745 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1748 writel(t
& ~(GLOB_SFT_RST
| STOP_PCI_MASTER
), reg
);
1751 } while ((GLOB_SFT_RST
& t
) && (i
-- > 0));
1753 if (GLOB_SFT_RST
& t
) {
1754 printk(KERN_ERR DRV_NAME
": can't clear global reset\n");
1761 static void mv6_read_preamp(struct mv_host_priv
*hpriv
, int idx
,
1764 void __iomem
*port_mmio
;
1767 tmp
= readl(mmio
+ MV_RESET_CFG
);
1768 if ((tmp
& (1 << 0)) == 0) {
1769 hpriv
->signal
[idx
].amps
= 0x7 << 8;
1770 hpriv
->signal
[idx
].pre
= 0x1 << 5;
1774 port_mmio
= mv_port_base(mmio
, idx
);
1775 tmp
= readl(port_mmio
+ PHY_MODE2
);
1777 hpriv
->signal
[idx
].amps
= tmp
& 0x700; /* bits 10:8 */
1778 hpriv
->signal
[idx
].pre
= tmp
& 0xe0; /* bits 7:5 */
1781 static void mv6_enable_leds(struct mv_host_priv
*hpriv
, void __iomem
*mmio
)
1783 writel(0x00000060, mmio
+ MV_GPIO_PORT_CTL
);
1786 static void mv6_phy_errata(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1789 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
1791 u32 hp_flags
= hpriv
->hp_flags
;
1793 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1795 hp_flags
& (MV_HP_ERRATA_60X1B2
| MV_HP_ERRATA_60X1C0
);
1798 if (fix_phy_mode2
) {
1799 m2
= readl(port_mmio
+ PHY_MODE2
);
1802 writel(m2
, port_mmio
+ PHY_MODE2
);
1806 m2
= readl(port_mmio
+ PHY_MODE2
);
1807 m2
&= ~((1 << 16) | (1 << 31));
1808 writel(m2
, port_mmio
+ PHY_MODE2
);
1813 /* who knows what this magic does */
1814 tmp
= readl(port_mmio
+ PHY_MODE3
);
1817 writel(tmp
, port_mmio
+ PHY_MODE3
);
1819 if (fix_phy_mode4
) {
1822 m4
= readl(port_mmio
+ PHY_MODE4
);
1824 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1825 tmp
= readl(port_mmio
+ 0x310);
1827 m4
= (m4
& ~(1 << 1)) | (1 << 0);
1829 writel(m4
, port_mmio
+ PHY_MODE4
);
1831 if (hp_flags
& MV_HP_ERRATA_60X1B2
)
1832 writel(tmp
, port_mmio
+ 0x310);
1835 /* Revert values of pre-emphasis and signal amps to the saved ones */
1836 m2
= readl(port_mmio
+ PHY_MODE2
);
1838 m2
&= ~MV_M2_PREAMP_MASK
;
1839 m2
|= hpriv
->signal
[port
].amps
;
1840 m2
|= hpriv
->signal
[port
].pre
;
1843 /* according to mvSata 3.6.1, some IIE values are fixed */
1844 if (IS_GEN_IIE(hpriv
)) {
1849 writel(m2
, port_mmio
+ PHY_MODE2
);
1852 static void mv_channel_reset(struct mv_host_priv
*hpriv
, void __iomem
*mmio
,
1853 unsigned int port_no
)
1855 void __iomem
*port_mmio
= mv_port_base(mmio
, port_no
);
1857 writelfl(ATA_RST
, port_mmio
+ EDMA_CMD_OFS
);
1859 if (IS_60XX(hpriv
)) {
1860 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
1861 ifctl
|= (1 << 12) | (1 << 7);
1862 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
1865 udelay(25); /* allow reset propagation */
1867 /* Spec never mentions clearing the bit. Marvell's driver does
1868 * clear the bit, however.
1870 writelfl(0, port_mmio
+ EDMA_CMD_OFS
);
1872 hpriv
->ops
->phy_errata(hpriv
, mmio
, port_no
);
1878 static void mv_stop_and_reset(struct ata_port
*ap
)
1880 struct mv_host_priv
*hpriv
= ap
->host_set
->private_data
;
1881 void __iomem
*mmio
= ap
->host_set
->mmio_base
;
1885 mv_channel_reset(hpriv
, mmio
, ap
->port_no
);
1887 __mv_phy_reset(ap
, 0);
1890 static inline void __msleep(unsigned int msec
, int can_sleep
)
1899 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
1900 * @ap: ATA channel to manipulate
1902 * Part of this is taken from __sata_phy_reset and modified to
1903 * not sleep since this routine gets called from interrupt level.
1906 * Inherited from caller. This is coded to safe to call at
1907 * interrupt level, i.e. it does not sleep.
1909 static void __mv_phy_reset(struct ata_port
*ap
, int can_sleep
)
1911 struct mv_port_priv
*pp
= ap
->private_data
;
1912 struct mv_host_priv
*hpriv
= ap
->host_set
->private_data
;
1913 void __iomem
*port_mmio
= mv_ap_base(ap
);
1914 struct ata_taskfile tf
;
1915 struct ata_device
*dev
= &ap
->device
[0];
1916 unsigned long timeout
;
1920 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap
->port_no
, port_mmio
);
1922 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
1923 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
1924 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
1926 /* Issue COMRESET via SControl */
1928 scr_write_flush(ap
, SCR_CONTROL
, 0x301);
1929 __msleep(1, can_sleep
);
1931 scr_write_flush(ap
, SCR_CONTROL
, 0x300);
1932 __msleep(20, can_sleep
);
1934 timeout
= jiffies
+ msecs_to_jiffies(200);
1936 sstatus
= scr_read(ap
, SCR_STATUS
) & 0x3;
1937 if ((sstatus
== 3) || (sstatus
== 0))
1940 __msleep(1, can_sleep
);
1941 } while (time_before(jiffies
, timeout
));
1943 /* work around errata */
1944 if (IS_60XX(hpriv
) &&
1945 (sstatus
!= 0x0) && (sstatus
!= 0x113) && (sstatus
!= 0x123) &&
1947 goto comreset_retry
;
1949 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
1950 "SCtrl 0x%08x\n", mv_scr_read(ap
, SCR_STATUS
),
1951 mv_scr_read(ap
, SCR_ERROR
), mv_scr_read(ap
, SCR_CONTROL
));
1953 if (sata_dev_present(ap
)) {
1956 printk(KERN_INFO
"ata%u: no device found (phy stat %08x)\n",
1957 ap
->id
, scr_read(ap
, SCR_STATUS
));
1958 ata_port_disable(ap
);
1961 ap
->cbl
= ATA_CBL_SATA
;
1963 /* even after SStatus reflects that device is ready,
1964 * it seems to take a while for link to be fully
1965 * established (and thus Status no longer 0x80/0x7F),
1966 * so we poll a bit for that, here.
1970 u8 drv_stat
= ata_check_status(ap
);
1971 if ((drv_stat
!= 0x80) && (drv_stat
!= 0x7f))
1973 __msleep(500, can_sleep
);
1978 tf
.lbah
= readb((void __iomem
*) ap
->ioaddr
.lbah_addr
);
1979 tf
.lbam
= readb((void __iomem
*) ap
->ioaddr
.lbam_addr
);
1980 tf
.lbal
= readb((void __iomem
*) ap
->ioaddr
.lbal_addr
);
1981 tf
.nsect
= readb((void __iomem
*) ap
->ioaddr
.nsect_addr
);
1983 dev
->class = ata_dev_classify(&tf
);
1984 if (!ata_dev_present(dev
)) {
1985 VPRINTK("Port disabled post-sig: No device present.\n");
1986 ata_port_disable(ap
);
1989 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
1991 pp
->pp_flags
&= ~MV_PP_FLAG_EDMA_EN
;
1996 static void mv_phy_reset(struct ata_port
*ap
)
1998 __mv_phy_reset(ap
, 1);
2002 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2003 * @ap: ATA channel to manipulate
2005 * Intent is to clear all pending error conditions, reset the
2006 * chip/bus, fail the command, and move on.
2009 * This routine holds the host_set lock while failing the command.
2011 static void mv_eng_timeout(struct ata_port
*ap
)
2013 struct ata_queued_cmd
*qc
;
2015 printk(KERN_ERR
"ata%u: Entering mv_eng_timeout\n",ap
->id
);
2016 DPRINTK("All regs @ start of eng_timeout\n");
2017 mv_dump_all_regs(ap
->host_set
->mmio_base
, ap
->port_no
,
2018 to_pci_dev(ap
->host_set
->dev
));
2020 qc
= ata_qc_from_tag(ap
, ap
->active_tag
);
2021 printk(KERN_ERR
"mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2022 ap
->host_set
->mmio_base
, ap
, qc
, qc
->scsicmd
,
2023 &qc
->scsicmd
->cmnd
);
2026 mv_stop_and_reset(ap
);
2029 printk(KERN_ERR
"ata%u: BUG: timeout without command\n",
2032 qc
->err_mask
|= AC_ERR_TIMEOUT
;
2033 ata_eh_qc_complete(qc
);
2038 * mv_port_init - Perform some early initialization on a single port.
2039 * @port: libata data structure storing shadow register addresses
2040 * @port_mmio: base address of the port
2042 * Initialize shadow register mmio addresses, clear outstanding
2043 * interrupts on the port, and unmask interrupts for the future
2044 * start of the port.
2047 * Inherited from caller.
2049 static void mv_port_init(struct ata_ioports
*port
, void __iomem
*port_mmio
)
2051 unsigned long shd_base
= (unsigned long) port_mmio
+ SHD_BLK_OFS
;
2054 /* PIO related setup
2056 port
->data_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DATA
);
2058 port
->feature_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_ERR
);
2059 port
->nsect_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_NSECT
);
2060 port
->lbal_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAL
);
2061 port
->lbam_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAM
);
2062 port
->lbah_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_LBAH
);
2063 port
->device_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_DEVICE
);
2065 port
->command_addr
= shd_base
+ (sizeof(u32
) * ATA_REG_STATUS
);
2066 /* special case: control/altstatus doesn't have ATA_REG_ address */
2067 port
->altstatus_addr
= port
->ctl_addr
= shd_base
+ SHD_CTL_AST_OFS
;
2070 port
->cmd_addr
= port
->bmdma_addr
= port
->scr_addr
= 0;
2072 /* Clear any currently outstanding port interrupt conditions */
2073 serr_ofs
= mv_scr_offset(SCR_ERROR
);
2074 writelfl(readl(port_mmio
+ serr_ofs
), port_mmio
+ serr_ofs
);
2075 writelfl(0, port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
);
2077 /* unmask all EDMA error interrupts */
2078 writelfl(~0, port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
);
2080 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2081 readl(port_mmio
+ EDMA_CFG_OFS
),
2082 readl(port_mmio
+ EDMA_ERR_IRQ_CAUSE_OFS
),
2083 readl(port_mmio
+ EDMA_ERR_IRQ_MASK_OFS
));
2086 static int mv_chip_id(struct pci_dev
*pdev
, struct mv_host_priv
*hpriv
,
2087 unsigned int board_idx
)
2090 u32 hp_flags
= hpriv
->hp_flags
;
2092 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2096 hpriv
->ops
= &mv5xxx_ops
;
2097 hp_flags
|= MV_HP_50XX
;
2101 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2104 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2107 dev_printk(KERN_WARNING
, &pdev
->dev
,
2108 "Applying 50XXB2 workarounds to unknown rev\n");
2109 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2116 hpriv
->ops
= &mv5xxx_ops
;
2117 hp_flags
|= MV_HP_50XX
;
2121 hp_flags
|= MV_HP_ERRATA_50XXB0
;
2124 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2127 dev_printk(KERN_WARNING
, &pdev
->dev
,
2128 "Applying B2 workarounds to unknown rev\n");
2129 hp_flags
|= MV_HP_ERRATA_50XXB2
;
2136 hpriv
->ops
= &mv6xxx_ops
;
2140 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2143 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2146 dev_printk(KERN_WARNING
, &pdev
->dev
,
2147 "Applying B2 workarounds to unknown rev\n");
2148 hp_flags
|= MV_HP_ERRATA_60X1B2
;
2155 hpriv
->ops
= &mv6xxx_ops
;
2157 hp_flags
|= MV_HP_GEN_IIE
;
2161 hp_flags
|= MV_HP_ERRATA_XX42A0
;
2164 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2167 dev_printk(KERN_WARNING
, &pdev
->dev
,
2168 "Applying 60X1C0 workarounds to unknown rev\n");
2169 hp_flags
|= MV_HP_ERRATA_60X1C0
;
2175 printk(KERN_ERR DRV_NAME
": BUG: invalid board index %u\n", board_idx
);
2179 hpriv
->hp_flags
= hp_flags
;
2185 * mv_init_host - Perform some early initialization of the host.
2186 * @pdev: host PCI device
2187 * @probe_ent: early data struct representing the host
2189 * If possible, do an early global reset of the host. Then do
2190 * our port init and clear/unmask all/relevant host interrupts.
2193 * Inherited from caller.
2195 static int mv_init_host(struct pci_dev
*pdev
, struct ata_probe_ent
*probe_ent
,
2196 unsigned int board_idx
)
2198 int rc
= 0, n_hc
, port
, hc
;
2199 void __iomem
*mmio
= probe_ent
->mmio_base
;
2200 struct mv_host_priv
*hpriv
= probe_ent
->private_data
;
2202 /* global interrupt mask */
2203 writel(0, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2205 rc
= mv_chip_id(pdev
, hpriv
, board_idx
);
2209 n_hc
= mv_get_hc_count(probe_ent
->host_flags
);
2210 probe_ent
->n_ports
= MV_PORTS_PER_HC
* n_hc
;
2212 for (port
= 0; port
< probe_ent
->n_ports
; port
++)
2213 hpriv
->ops
->read_preamp(hpriv
, port
, mmio
);
2215 rc
= hpriv
->ops
->reset_hc(hpriv
, mmio
, n_hc
);
2219 hpriv
->ops
->reset_flash(hpriv
, mmio
);
2220 hpriv
->ops
->reset_bus(pdev
, mmio
);
2221 hpriv
->ops
->enable_leds(hpriv
, mmio
);
2223 for (port
= 0; port
< probe_ent
->n_ports
; port
++) {
2224 if (IS_60XX(hpriv
)) {
2225 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2227 u32 ifctl
= readl(port_mmio
+ SATA_INTERFACE_CTL
);
2229 writelfl(ifctl
, port_mmio
+ SATA_INTERFACE_CTL
);
2232 hpriv
->ops
->phy_errata(hpriv
, mmio
, port
);
2235 for (port
= 0; port
< probe_ent
->n_ports
; port
++) {
2236 void __iomem
*port_mmio
= mv_port_base(mmio
, port
);
2237 mv_port_init(&probe_ent
->port
[port
], port_mmio
);
2240 for (hc
= 0; hc
< n_hc
; hc
++) {
2241 void __iomem
*hc_mmio
= mv_hc_base(mmio
, hc
);
2243 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2244 "(before clear)=0x%08x\n", hc
,
2245 readl(hc_mmio
+ HC_CFG_OFS
),
2246 readl(hc_mmio
+ HC_IRQ_CAUSE_OFS
));
2248 /* Clear any currently outstanding hc interrupt conditions */
2249 writelfl(0, hc_mmio
+ HC_IRQ_CAUSE_OFS
);
2252 /* Clear any currently outstanding host interrupt conditions */
2253 writelfl(0, mmio
+ PCI_IRQ_CAUSE_OFS
);
2255 /* and unmask interrupt generation for host regs */
2256 writelfl(PCI_UNMASK_ALL_IRQS
, mmio
+ PCI_IRQ_MASK_OFS
);
2257 writelfl(~HC_MAIN_MASKED_IRQS
, mmio
+ HC_MAIN_IRQ_MASK_OFS
);
2259 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2260 "PCI int cause/mask=0x%08x/0x%08x\n",
2261 readl(mmio
+ HC_MAIN_IRQ_CAUSE_OFS
),
2262 readl(mmio
+ HC_MAIN_IRQ_MASK_OFS
),
2263 readl(mmio
+ PCI_IRQ_CAUSE_OFS
),
2264 readl(mmio
+ PCI_IRQ_MASK_OFS
));
2271 * mv_print_info - Dump key info to kernel log for perusal.
2272 * @probe_ent: early data struct representing the host
2274 * FIXME: complete this.
2277 * Inherited from caller.
2279 static void mv_print_info(struct ata_probe_ent
*probe_ent
)
2281 struct pci_dev
*pdev
= to_pci_dev(probe_ent
->dev
);
2282 struct mv_host_priv
*hpriv
= probe_ent
->private_data
;
2286 /* Use this to determine the HW stepping of the chip so we know
2287 * what errata to workaround
2289 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
2291 pci_read_config_byte(pdev
, PCI_CLASS_DEVICE
, &scc
);
2294 else if (scc
== 0x01)
2299 dev_printk(KERN_INFO
, &pdev
->dev
,
2300 "%u slots %u ports %s mode IRQ via %s\n",
2301 (unsigned)MV_MAX_Q_DEPTH
, probe_ent
->n_ports
,
2302 scc_s
, (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) ? "MSI" : "INTx");
2306 * mv_init_one - handle a positive probe of a Marvell host
2307 * @pdev: PCI device found
2308 * @ent: PCI device ID entry for the matched host
2311 * Inherited from caller.
2313 static int mv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2315 static int printed_version
= 0;
2316 struct ata_probe_ent
*probe_ent
= NULL
;
2317 struct mv_host_priv
*hpriv
;
2318 unsigned int board_idx
= (unsigned int)ent
->driver_data
;
2319 void __iomem
*mmio_base
;
2320 int pci_dev_busy
= 0, rc
;
2322 if (!printed_version
++)
2323 dev_printk(KERN_INFO
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2325 rc
= pci_enable_device(pdev
);
2330 rc
= pci_request_regions(pdev
, DRV_NAME
);
2336 probe_ent
= kmalloc(sizeof(*probe_ent
), GFP_KERNEL
);
2337 if (probe_ent
== NULL
) {
2339 goto err_out_regions
;
2342 memset(probe_ent
, 0, sizeof(*probe_ent
));
2343 probe_ent
->dev
= pci_dev_to_dev(pdev
);
2344 INIT_LIST_HEAD(&probe_ent
->node
);
2346 mmio_base
= pci_iomap(pdev
, MV_PRIMARY_BAR
, 0);
2347 if (mmio_base
== NULL
) {
2349 goto err_out_free_ent
;
2352 hpriv
= kmalloc(sizeof(*hpriv
), GFP_KERNEL
);
2355 goto err_out_iounmap
;
2357 memset(hpriv
, 0, sizeof(*hpriv
));
2359 probe_ent
->sht
= mv_port_info
[board_idx
].sht
;
2360 probe_ent
->host_flags
= mv_port_info
[board_idx
].host_flags
;
2361 probe_ent
->pio_mask
= mv_port_info
[board_idx
].pio_mask
;
2362 probe_ent
->udma_mask
= mv_port_info
[board_idx
].udma_mask
;
2363 probe_ent
->port_ops
= mv_port_info
[board_idx
].port_ops
;
2365 probe_ent
->irq
= pdev
->irq
;
2366 probe_ent
->irq_flags
= SA_SHIRQ
;
2367 probe_ent
->mmio_base
= mmio_base
;
2368 probe_ent
->private_data
= hpriv
;
2370 /* initialize adapter */
2371 rc
= mv_init_host(pdev
, probe_ent
, board_idx
);
2376 /* Enable interrupts */
2377 if (msi
&& pci_enable_msi(pdev
) == 0) {
2378 hpriv
->hp_flags
|= MV_HP_FLAG_MSI
;
2383 mv_dump_pci_cfg(pdev
, 0x68);
2384 mv_print_info(probe_ent
);
2386 if (ata_device_add(probe_ent
) == 0) {
2387 rc
= -ENODEV
; /* No devices discovered */
2388 goto err_out_dev_add
;
2395 if (MV_HP_FLAG_MSI
& hpriv
->hp_flags
) {
2396 pci_disable_msi(pdev
);
2403 pci_iounmap(pdev
, mmio_base
);
2407 pci_release_regions(pdev
);
2409 if (!pci_dev_busy
) {
2410 pci_disable_device(pdev
);
2416 static int __init
mv_init(void)
2418 return pci_module_init(&mv_pci_driver
);
2421 static void __exit
mv_exit(void)
2423 pci_unregister_driver(&mv_pci_driver
);
2426 MODULE_AUTHOR("Brett Russ");
2427 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2428 MODULE_LICENSE("GPL");
2429 MODULE_DEVICE_TABLE(pci
, mv_pci_tbl
);
2430 MODULE_VERSION(DRV_VERSION
);
2432 module_param(msi
, int, 0444);
2433 MODULE_PARM_DESC(msi
, "Enable use of PCI MSI (0=off, 1=on)");
2435 module_init(mv_init
);
2436 module_exit(mv_exit
);