2 * sata_nv.c - NVIDIA nForce SATA
4 * Copyright 2004 NVIDIA Corp. All rights reserved.
5 * Copyright 2004 Andrew Chew
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 * libata documentation is available via 'make {ps|pdf}docs',
24 * as Documentation/DocBook/libata.*
26 * No hardware documentation available outside of NVIDIA.
27 * This driver programs the NVIDIA SATA controller in a similar
28 * fashion as with other PCI IDE BMDMA controllers, with a few
29 * NV-specific details such as register offsets, SATA phy location,
32 * CK804/MCP04 controllers support an alternate programming interface
33 * similar to the ADMA specification (with some modifications).
34 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35 * sent through the legacy interface.
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
51 #define DRV_NAME "sata_nv"
52 #define DRV_VERSION "3.5"
54 #define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
63 NV_PORT0_SCR_REG_OFFSET
= 0x00,
64 NV_PORT1_SCR_REG_OFFSET
= 0x40,
66 /* INT_STATUS/ENABLE */
69 NV_INT_STATUS_CK804
= 0x440,
70 NV_INT_ENABLE_CK804
= 0x441,
72 /* INT_STATUS/ENABLE bits */
76 NV_INT_REMOVED
= 0x08,
78 NV_INT_PORT_SHIFT
= 4, /* each port occupies 4 bits */
81 NV_INT_MASK
= NV_INT_DEV
|
82 NV_INT_ADDED
| NV_INT_REMOVED
,
86 NV_INT_CONFIG_METHD
= 0x01, // 0 = INT, 1 = SMI
88 // For PCI config register 20
89 NV_MCP_SATA_CFG_20
= 0x50,
90 NV_MCP_SATA_CFG_20_SATA_SPACE_EN
= 0x04,
91 NV_MCP_SATA_CFG_20_PORT0_EN
= (1 << 17),
92 NV_MCP_SATA_CFG_20_PORT1_EN
= (1 << 16),
93 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
= (1 << 14),
94 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
= (1 << 12),
96 NV_ADMA_MAX_CPBS
= 32,
99 NV_ADMA_SGTBL_LEN
= (1024 - NV_ADMA_CPB_SZ
) /
101 NV_ADMA_SGTBL_TOTAL_LEN
= NV_ADMA_SGTBL_LEN
+ 5,
102 NV_ADMA_SGTBL_SZ
= NV_ADMA_SGTBL_LEN
* NV_ADMA_APRD_SZ
,
103 NV_ADMA_PORT_PRIV_DMA_SZ
= NV_ADMA_MAX_CPBS
*
104 (NV_ADMA_CPB_SZ
+ NV_ADMA_SGTBL_SZ
),
106 /* BAR5 offset to ADMA general registers */
108 NV_ADMA_GEN_CTL
= 0x00,
109 NV_ADMA_NOTIFIER_CLEAR
= 0x30,
111 /* BAR5 offset to ADMA ports */
112 NV_ADMA_PORT
= 0x480,
114 /* size of ADMA port register space */
115 NV_ADMA_PORT_SIZE
= 0x100,
117 /* ADMA port registers */
119 NV_ADMA_CPB_COUNT
= 0x42,
120 NV_ADMA_NEXT_CPB_IDX
= 0x43,
122 NV_ADMA_CPB_BASE_LOW
= 0x48,
123 NV_ADMA_CPB_BASE_HIGH
= 0x4C,
124 NV_ADMA_APPEND
= 0x50,
125 NV_ADMA_NOTIFIER
= 0x68,
126 NV_ADMA_NOTIFIER_ERROR
= 0x6C,
128 /* NV_ADMA_CTL register bits */
129 NV_ADMA_CTL_HOTPLUG_IEN
= (1 << 0),
130 NV_ADMA_CTL_CHANNEL_RESET
= (1 << 5),
131 NV_ADMA_CTL_GO
= (1 << 7),
132 NV_ADMA_CTL_AIEN
= (1 << 8),
133 NV_ADMA_CTL_READ_NON_COHERENT
= (1 << 11),
134 NV_ADMA_CTL_WRITE_NON_COHERENT
= (1 << 12),
136 /* CPB response flag bits */
137 NV_CPB_RESP_DONE
= (1 << 0),
138 NV_CPB_RESP_ATA_ERR
= (1 << 3),
139 NV_CPB_RESP_CMD_ERR
= (1 << 4),
140 NV_CPB_RESP_CPB_ERR
= (1 << 7),
142 /* CPB control flag bits */
143 NV_CPB_CTL_CPB_VALID
= (1 << 0),
144 NV_CPB_CTL_QUEUE
= (1 << 1),
145 NV_CPB_CTL_APRD_VALID
= (1 << 2),
146 NV_CPB_CTL_IEN
= (1 << 3),
147 NV_CPB_CTL_FPDMA
= (1 << 4),
150 NV_APRD_WRITE
= (1 << 1),
151 NV_APRD_END
= (1 << 2),
152 NV_APRD_CONT
= (1 << 3),
154 /* NV_ADMA_STAT flags */
155 NV_ADMA_STAT_TIMEOUT
= (1 << 0),
156 NV_ADMA_STAT_HOTUNPLUG
= (1 << 1),
157 NV_ADMA_STAT_HOTPLUG
= (1 << 2),
158 NV_ADMA_STAT_CPBERR
= (1 << 4),
159 NV_ADMA_STAT_SERROR
= (1 << 5),
160 NV_ADMA_STAT_CMD_COMPLETE
= (1 << 6),
161 NV_ADMA_STAT_IDLE
= (1 << 8),
162 NV_ADMA_STAT_LEGACY
= (1 << 9),
163 NV_ADMA_STAT_STOPPED
= (1 << 10),
164 NV_ADMA_STAT_DONE
= (1 << 12),
165 NV_ADMA_STAT_ERR
= NV_ADMA_STAT_CPBERR
|
166 NV_ADMA_STAT_TIMEOUT
,
169 NV_ADMA_PORT_REGISTER_MODE
= (1 << 0),
170 NV_ADMA_ATAPI_SETUP_COMPLETE
= (1 << 1),
172 /* MCP55 reg offset */
173 NV_CTL_MCP55
= 0x400,
174 NV_INT_STATUS_MCP55
= 0x440,
175 NV_INT_ENABLE_MCP55
= 0x444,
176 NV_NCQ_REG_MCP55
= 0x448,
179 NV_INT_ALL_MCP55
= 0xffff,
180 NV_INT_PORT_SHIFT_MCP55
= 16, /* each port occupies 16 bits */
181 NV_INT_MASK_MCP55
= NV_INT_ALL_MCP55
& 0xfffd,
183 /* SWNCQ ENABLE BITS*/
184 NV_CTL_PRI_SWNCQ
= 0x02,
185 NV_CTL_SEC_SWNCQ
= 0x04,
187 /* SW NCQ status bits*/
188 NV_SWNCQ_IRQ_DEV
= (1 << 0),
189 NV_SWNCQ_IRQ_PM
= (1 << 1),
190 NV_SWNCQ_IRQ_ADDED
= (1 << 2),
191 NV_SWNCQ_IRQ_REMOVED
= (1 << 3),
193 NV_SWNCQ_IRQ_BACKOUT
= (1 << 4),
194 NV_SWNCQ_IRQ_SDBFIS
= (1 << 5),
195 NV_SWNCQ_IRQ_DHREGFIS
= (1 << 6),
196 NV_SWNCQ_IRQ_DMASETUP
= (1 << 7),
198 NV_SWNCQ_IRQ_HOTPLUG
= NV_SWNCQ_IRQ_ADDED
|
199 NV_SWNCQ_IRQ_REMOVED
,
203 /* ADMA Physical Region Descriptor - one SG segment */
212 enum nv_adma_regbits
{
213 CMDEND
= (1 << 15), /* end of command list */
214 WNB
= (1 << 14), /* wait-not-BSY */
215 IGN
= (1 << 13), /* ignore this entry */
216 CS1n
= (1 << (4 + 8)), /* std. PATA signals follow... */
217 DA2
= (1 << (2 + 8)),
218 DA1
= (1 << (1 + 8)),
219 DA0
= (1 << (0 + 8)),
222 /* ADMA Command Parameter Block
223 The first 5 SG segments are stored inside the Command Parameter Block itself.
224 If there are more than 5 segments the remainder are stored in a separate
225 memory area indicated by next_aprd. */
227 u8 resp_flags
; /* 0 */
228 u8 reserved1
; /* 1 */
229 u8 ctl_flags
; /* 2 */
230 /* len is length of taskfile in 64 bit words */
233 u8 next_cpb_idx
; /* 5 */
234 __le16 reserved2
; /* 6-7 */
235 __le16 tf
[12]; /* 8-31 */
236 struct nv_adma_prd aprd
[5]; /* 32-111 */
237 __le64 next_aprd
; /* 112-119 */
238 __le64 reserved3
; /* 120-127 */
242 struct nv_adma_port_priv
{
243 struct nv_adma_cpb
*cpb
;
245 struct nv_adma_prd
*aprd
;
247 void __iomem
*ctl_block
;
248 void __iomem
*gen_block
;
249 void __iomem
*notifier_clear_block
;
255 struct nv_host_priv
{
263 unsigned int tag
[ATA_MAX_QUEUE
];
266 enum ncq_saw_flag_list
{
267 ncq_saw_d2h
= (1U << 0),
268 ncq_saw_dmas
= (1U << 1),
269 ncq_saw_sdb
= (1U << 2),
270 ncq_saw_backout
= (1U << 3),
273 struct nv_swncq_port_priv
{
274 struct ata_prd
*prd
; /* our SG list */
275 dma_addr_t prd_dma
; /* and its DMA mapping */
276 void __iomem
*sactive_block
;
277 void __iomem
*irq_block
;
278 void __iomem
*tag_block
;
281 unsigned int last_issue_tag
;
283 /* fifo circular queue to store deferral command */
284 struct defer_queue defer_queue
;
286 /* for NCQ interrupt analysis */
291 unsigned int ncq_flags
;
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
297 static int nv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
);
299 static int nv_pci_device_resume(struct pci_dev
*pdev
);
301 static void nv_ck804_host_stop(struct ata_host
*host
);
302 static irqreturn_t
nv_generic_interrupt(int irq
, void *dev_instance
);
303 static irqreturn_t
nv_nf2_interrupt(int irq
, void *dev_instance
);
304 static irqreturn_t
nv_ck804_interrupt(int irq
, void *dev_instance
);
305 static int nv_scr_read(struct ata_port
*ap
, unsigned int sc_reg
, u32
*val
);
306 static int nv_scr_write(struct ata_port
*ap
, unsigned int sc_reg
, u32 val
);
308 static void nv_nf2_freeze(struct ata_port
*ap
);
309 static void nv_nf2_thaw(struct ata_port
*ap
);
310 static void nv_ck804_freeze(struct ata_port
*ap
);
311 static void nv_ck804_thaw(struct ata_port
*ap
);
312 static void nv_error_handler(struct ata_port
*ap
);
313 static int nv_adma_slave_config(struct scsi_device
*sdev
);
314 static int nv_adma_check_atapi_dma(struct ata_queued_cmd
*qc
);
315 static void nv_adma_qc_prep(struct ata_queued_cmd
*qc
);
316 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd
*qc
);
317 static irqreturn_t
nv_adma_interrupt(int irq
, void *dev_instance
);
318 static void nv_adma_irq_clear(struct ata_port
*ap
);
319 static int nv_adma_port_start(struct ata_port
*ap
);
320 static void nv_adma_port_stop(struct ata_port
*ap
);
322 static int nv_adma_port_suspend(struct ata_port
*ap
, pm_message_t mesg
);
323 static int nv_adma_port_resume(struct ata_port
*ap
);
325 static void nv_adma_freeze(struct ata_port
*ap
);
326 static void nv_adma_thaw(struct ata_port
*ap
);
327 static void nv_adma_error_handler(struct ata_port
*ap
);
328 static void nv_adma_host_stop(struct ata_host
*host
);
329 static void nv_adma_post_internal_cmd(struct ata_queued_cmd
*qc
);
330 static void nv_adma_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
);
332 static void nv_mcp55_thaw(struct ata_port
*ap
);
333 static void nv_mcp55_freeze(struct ata_port
*ap
);
334 static void nv_swncq_error_handler(struct ata_port
*ap
);
335 static int nv_swncq_slave_config(struct scsi_device
*sdev
);
336 static int nv_swncq_port_start(struct ata_port
*ap
);
337 static void nv_swncq_qc_prep(struct ata_queued_cmd
*qc
);
338 static void nv_swncq_fill_sg(struct ata_queued_cmd
*qc
);
339 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd
*qc
);
340 static void nv_swncq_irq_clear(struct ata_port
*ap
, u16 fis
);
341 static irqreturn_t
nv_swncq_interrupt(int irq
, void *dev_instance
);
343 static int nv_swncq_port_suspend(struct ata_port
*ap
, pm_message_t mesg
);
344 static int nv_swncq_port_resume(struct ata_port
*ap
);
351 NFORCE3
= NFORCE2
, /* NF2 == NF3 as far as sata_nv is concerned */
357 static const struct pci_device_id nv_pci_tbl
[] = {
358 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA
), NFORCE2
},
359 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA
), NFORCE3
},
360 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2
), NFORCE3
},
361 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA
), CK804
},
362 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2
), CK804
},
363 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA
), CK804
},
364 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2
), CK804
},
365 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA
), SWNCQ
},
366 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2
), SWNCQ
},
367 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA
), SWNCQ
},
368 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2
), SWNCQ
},
369 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA
), GENERIC
},
370 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2
), GENERIC
},
371 { PCI_VDEVICE(NVIDIA
, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3
), GENERIC
},
373 { } /* terminate list */
376 static struct pci_driver nv_pci_driver
= {
378 .id_table
= nv_pci_tbl
,
379 .probe
= nv_init_one
,
381 .suspend
= ata_pci_device_suspend
,
382 .resume
= nv_pci_device_resume
,
384 .remove
= ata_pci_remove_one
,
387 static struct scsi_host_template nv_sht
= {
388 .module
= THIS_MODULE
,
390 .ioctl
= ata_scsi_ioctl
,
391 .queuecommand
= ata_scsi_queuecmd
,
392 .can_queue
= ATA_DEF_QUEUE
,
393 .this_id
= ATA_SHT_THIS_ID
,
394 .sg_tablesize
= LIBATA_MAX_PRD
,
395 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
396 .emulated
= ATA_SHT_EMULATED
,
397 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
398 .proc_name
= DRV_NAME
,
399 .dma_boundary
= ATA_DMA_BOUNDARY
,
400 .slave_configure
= ata_scsi_slave_config
,
401 .slave_destroy
= ata_scsi_slave_destroy
,
402 .bios_param
= ata_std_bios_param
,
405 static struct scsi_host_template nv_adma_sht
= {
406 .module
= THIS_MODULE
,
408 .ioctl
= ata_scsi_ioctl
,
409 .queuecommand
= ata_scsi_queuecmd
,
410 .change_queue_depth
= ata_scsi_change_queue_depth
,
411 .can_queue
= NV_ADMA_MAX_CPBS
,
412 .this_id
= ATA_SHT_THIS_ID
,
413 .sg_tablesize
= NV_ADMA_SGTBL_TOTAL_LEN
,
414 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
415 .emulated
= ATA_SHT_EMULATED
,
416 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
417 .proc_name
= DRV_NAME
,
418 .dma_boundary
= NV_ADMA_DMA_BOUNDARY
,
419 .slave_configure
= nv_adma_slave_config
,
420 .slave_destroy
= ata_scsi_slave_destroy
,
421 .bios_param
= ata_std_bios_param
,
424 static struct scsi_host_template nv_swncq_sht
= {
425 .module
= THIS_MODULE
,
427 .ioctl
= ata_scsi_ioctl
,
428 .queuecommand
= ata_scsi_queuecmd
,
429 .change_queue_depth
= ata_scsi_change_queue_depth
,
430 .can_queue
= ATA_MAX_QUEUE
,
431 .this_id
= ATA_SHT_THIS_ID
,
432 .sg_tablesize
= LIBATA_MAX_PRD
,
433 .cmd_per_lun
= ATA_SHT_CMD_PER_LUN
,
434 .emulated
= ATA_SHT_EMULATED
,
435 .use_clustering
= ATA_SHT_USE_CLUSTERING
,
436 .proc_name
= DRV_NAME
,
437 .dma_boundary
= ATA_DMA_BOUNDARY
,
438 .slave_configure
= nv_swncq_slave_config
,
439 .slave_destroy
= ata_scsi_slave_destroy
,
440 .bios_param
= ata_std_bios_param
,
443 static const struct ata_port_operations nv_generic_ops
= {
444 .tf_load
= ata_tf_load
,
445 .tf_read
= ata_tf_read
,
446 .exec_command
= ata_exec_command
,
447 .check_status
= ata_check_status
,
448 .dev_select
= ata_std_dev_select
,
449 .bmdma_setup
= ata_bmdma_setup
,
450 .bmdma_start
= ata_bmdma_start
,
451 .bmdma_stop
= ata_bmdma_stop
,
452 .bmdma_status
= ata_bmdma_status
,
453 .qc_prep
= ata_qc_prep
,
454 .qc_issue
= ata_qc_issue_prot
,
455 .freeze
= ata_bmdma_freeze
,
456 .thaw
= ata_bmdma_thaw
,
457 .error_handler
= nv_error_handler
,
458 .post_internal_cmd
= ata_bmdma_post_internal_cmd
,
459 .data_xfer
= ata_data_xfer
,
460 .irq_clear
= ata_bmdma_irq_clear
,
461 .irq_on
= ata_irq_on
,
462 .scr_read
= nv_scr_read
,
463 .scr_write
= nv_scr_write
,
464 .port_start
= ata_port_start
,
467 static const struct ata_port_operations nv_nf2_ops
= {
468 .tf_load
= ata_tf_load
,
469 .tf_read
= ata_tf_read
,
470 .exec_command
= ata_exec_command
,
471 .check_status
= ata_check_status
,
472 .dev_select
= ata_std_dev_select
,
473 .bmdma_setup
= ata_bmdma_setup
,
474 .bmdma_start
= ata_bmdma_start
,
475 .bmdma_stop
= ata_bmdma_stop
,
476 .bmdma_status
= ata_bmdma_status
,
477 .qc_prep
= ata_qc_prep
,
478 .qc_issue
= ata_qc_issue_prot
,
479 .freeze
= nv_nf2_freeze
,
481 .error_handler
= nv_error_handler
,
482 .post_internal_cmd
= ata_bmdma_post_internal_cmd
,
483 .data_xfer
= ata_data_xfer
,
484 .irq_clear
= ata_bmdma_irq_clear
,
485 .irq_on
= ata_irq_on
,
486 .scr_read
= nv_scr_read
,
487 .scr_write
= nv_scr_write
,
488 .port_start
= ata_port_start
,
491 static const struct ata_port_operations nv_ck804_ops
= {
492 .tf_load
= ata_tf_load
,
493 .tf_read
= ata_tf_read
,
494 .exec_command
= ata_exec_command
,
495 .check_status
= ata_check_status
,
496 .dev_select
= ata_std_dev_select
,
497 .bmdma_setup
= ata_bmdma_setup
,
498 .bmdma_start
= ata_bmdma_start
,
499 .bmdma_stop
= ata_bmdma_stop
,
500 .bmdma_status
= ata_bmdma_status
,
501 .qc_prep
= ata_qc_prep
,
502 .qc_issue
= ata_qc_issue_prot
,
503 .freeze
= nv_ck804_freeze
,
504 .thaw
= nv_ck804_thaw
,
505 .error_handler
= nv_error_handler
,
506 .post_internal_cmd
= ata_bmdma_post_internal_cmd
,
507 .data_xfer
= ata_data_xfer
,
508 .irq_clear
= ata_bmdma_irq_clear
,
509 .irq_on
= ata_irq_on
,
510 .scr_read
= nv_scr_read
,
511 .scr_write
= nv_scr_write
,
512 .port_start
= ata_port_start
,
513 .host_stop
= nv_ck804_host_stop
,
516 static const struct ata_port_operations nv_adma_ops
= {
517 .tf_load
= ata_tf_load
,
518 .tf_read
= nv_adma_tf_read
,
519 .check_atapi_dma
= nv_adma_check_atapi_dma
,
520 .exec_command
= ata_exec_command
,
521 .check_status
= ata_check_status
,
522 .dev_select
= ata_std_dev_select
,
523 .bmdma_setup
= ata_bmdma_setup
,
524 .bmdma_start
= ata_bmdma_start
,
525 .bmdma_stop
= ata_bmdma_stop
,
526 .bmdma_status
= ata_bmdma_status
,
527 .qc_defer
= ata_std_qc_defer
,
528 .qc_prep
= nv_adma_qc_prep
,
529 .qc_issue
= nv_adma_qc_issue
,
530 .freeze
= nv_adma_freeze
,
531 .thaw
= nv_adma_thaw
,
532 .error_handler
= nv_adma_error_handler
,
533 .post_internal_cmd
= nv_adma_post_internal_cmd
,
534 .data_xfer
= ata_data_xfer
,
535 .irq_clear
= nv_adma_irq_clear
,
536 .irq_on
= ata_irq_on
,
537 .scr_read
= nv_scr_read
,
538 .scr_write
= nv_scr_write
,
539 .port_start
= nv_adma_port_start
,
540 .port_stop
= nv_adma_port_stop
,
542 .port_suspend
= nv_adma_port_suspend
,
543 .port_resume
= nv_adma_port_resume
,
545 .host_stop
= nv_adma_host_stop
,
548 static const struct ata_port_operations nv_swncq_ops
= {
549 .tf_load
= ata_tf_load
,
550 .tf_read
= ata_tf_read
,
551 .exec_command
= ata_exec_command
,
552 .check_status
= ata_check_status
,
553 .dev_select
= ata_std_dev_select
,
554 .bmdma_setup
= ata_bmdma_setup
,
555 .bmdma_start
= ata_bmdma_start
,
556 .bmdma_stop
= ata_bmdma_stop
,
557 .bmdma_status
= ata_bmdma_status
,
558 .qc_defer
= ata_std_qc_defer
,
559 .qc_prep
= nv_swncq_qc_prep
,
560 .qc_issue
= nv_swncq_qc_issue
,
561 .freeze
= nv_mcp55_freeze
,
562 .thaw
= nv_mcp55_thaw
,
563 .error_handler
= nv_swncq_error_handler
,
564 .post_internal_cmd
= ata_bmdma_post_internal_cmd
,
565 .data_xfer
= ata_data_xfer
,
566 .irq_clear
= ata_bmdma_irq_clear
,
567 .irq_on
= ata_irq_on
,
568 .scr_read
= nv_scr_read
,
569 .scr_write
= nv_scr_write
,
571 .port_suspend
= nv_swncq_port_suspend
,
572 .port_resume
= nv_swncq_port_resume
,
574 .port_start
= nv_swncq_port_start
,
577 static const struct ata_port_info nv_port_info
[] = {
581 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
,
582 .pio_mask
= NV_PIO_MASK
,
583 .mwdma_mask
= NV_MWDMA_MASK
,
584 .udma_mask
= NV_UDMA_MASK
,
585 .port_ops
= &nv_generic_ops
,
586 .irq_handler
= nv_generic_interrupt
,
591 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
,
592 .pio_mask
= NV_PIO_MASK
,
593 .mwdma_mask
= NV_MWDMA_MASK
,
594 .udma_mask
= NV_UDMA_MASK
,
595 .port_ops
= &nv_nf2_ops
,
596 .irq_handler
= nv_nf2_interrupt
,
601 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
,
602 .pio_mask
= NV_PIO_MASK
,
603 .mwdma_mask
= NV_MWDMA_MASK
,
604 .udma_mask
= NV_UDMA_MASK
,
605 .port_ops
= &nv_ck804_ops
,
606 .irq_handler
= nv_ck804_interrupt
,
611 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
612 ATA_FLAG_MMIO
| ATA_FLAG_NCQ
,
613 .pio_mask
= NV_PIO_MASK
,
614 .mwdma_mask
= NV_MWDMA_MASK
,
615 .udma_mask
= NV_UDMA_MASK
,
616 .port_ops
= &nv_adma_ops
,
617 .irq_handler
= nv_adma_interrupt
,
621 .sht
= &nv_swncq_sht
,
622 .flags
= ATA_FLAG_SATA
| ATA_FLAG_NO_LEGACY
|
624 .pio_mask
= NV_PIO_MASK
,
625 .mwdma_mask
= NV_MWDMA_MASK
,
626 .udma_mask
= NV_UDMA_MASK
,
627 .port_ops
= &nv_swncq_ops
,
628 .irq_handler
= nv_swncq_interrupt
,
632 MODULE_AUTHOR("NVIDIA");
633 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
634 MODULE_LICENSE("GPL");
635 MODULE_DEVICE_TABLE(pci
, nv_pci_tbl
);
636 MODULE_VERSION(DRV_VERSION
);
638 static int adma_enabled
= 1;
639 static int swncq_enabled
;
641 static void nv_adma_register_mode(struct ata_port
*ap
)
643 struct nv_adma_port_priv
*pp
= ap
->private_data
;
644 void __iomem
*mmio
= pp
->ctl_block
;
648 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)
651 status
= readw(mmio
+ NV_ADMA_STAT
);
652 while (!(status
& NV_ADMA_STAT_IDLE
) && count
< 20) {
654 status
= readw(mmio
+ NV_ADMA_STAT
);
658 ata_port_printk(ap
, KERN_WARNING
,
659 "timeout waiting for ADMA IDLE, stat=0x%hx\n",
662 tmp
= readw(mmio
+ NV_ADMA_CTL
);
663 writew(tmp
& ~NV_ADMA_CTL_GO
, mmio
+ NV_ADMA_CTL
);
666 status
= readw(mmio
+ NV_ADMA_STAT
);
667 while (!(status
& NV_ADMA_STAT_LEGACY
) && count
< 20) {
669 status
= readw(mmio
+ NV_ADMA_STAT
);
673 ata_port_printk(ap
, KERN_WARNING
,
674 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
677 pp
->flags
|= NV_ADMA_PORT_REGISTER_MODE
;
680 static void nv_adma_mode(struct ata_port
*ap
)
682 struct nv_adma_port_priv
*pp
= ap
->private_data
;
683 void __iomem
*mmio
= pp
->ctl_block
;
687 if (!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
))
690 WARN_ON(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
);
692 tmp
= readw(mmio
+ NV_ADMA_CTL
);
693 writew(tmp
| NV_ADMA_CTL_GO
, mmio
+ NV_ADMA_CTL
);
695 status
= readw(mmio
+ NV_ADMA_STAT
);
696 while (((status
& NV_ADMA_STAT_LEGACY
) ||
697 !(status
& NV_ADMA_STAT_IDLE
)) && count
< 20) {
699 status
= readw(mmio
+ NV_ADMA_STAT
);
703 ata_port_printk(ap
, KERN_WARNING
,
704 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
707 pp
->flags
&= ~NV_ADMA_PORT_REGISTER_MODE
;
710 static int nv_adma_slave_config(struct scsi_device
*sdev
)
712 struct ata_port
*ap
= ata_shost_to_port(sdev
->host
);
713 struct nv_adma_port_priv
*pp
= ap
->private_data
;
714 struct nv_adma_port_priv
*port0
, *port1
;
715 struct scsi_device
*sdev0
, *sdev1
;
716 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
717 unsigned long segment_boundary
, flags
;
718 unsigned short sg_tablesize
;
721 u32 current_reg
, new_reg
, config_mask
;
723 rc
= ata_scsi_slave_config(sdev
);
725 if (sdev
->id
>= ATA_MAX_DEVICES
|| sdev
->channel
|| sdev
->lun
)
726 /* Not a proper libata device, ignore */
729 spin_lock_irqsave(ap
->lock
, flags
);
731 if (ap
->link
.device
[sdev
->id
].class == ATA_DEV_ATAPI
) {
733 * NVIDIA reports that ADMA mode does not support ATAPI commands.
734 * Therefore ATAPI commands are sent through the legacy interface.
735 * However, the legacy interface only supports 32-bit DMA.
736 * Restrict DMA parameters as required by the legacy interface
737 * when an ATAPI device is connected.
739 segment_boundary
= ATA_DMA_BOUNDARY
;
740 /* Subtract 1 since an extra entry may be needed for padding, see
742 sg_tablesize
= LIBATA_MAX_PRD
- 1;
744 /* Since the legacy DMA engine is in use, we need to disable ADMA
747 nv_adma_register_mode(ap
);
749 segment_boundary
= NV_ADMA_DMA_BOUNDARY
;
750 sg_tablesize
= NV_ADMA_SGTBL_TOTAL_LEN
;
754 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, ¤t_reg
);
756 if (ap
->port_no
== 1)
757 config_mask
= NV_MCP_SATA_CFG_20_PORT1_EN
|
758 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
;
760 config_mask
= NV_MCP_SATA_CFG_20_PORT0_EN
|
761 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
;
764 new_reg
= current_reg
| config_mask
;
765 pp
->flags
&= ~NV_ADMA_ATAPI_SETUP_COMPLETE
;
767 new_reg
= current_reg
& ~config_mask
;
768 pp
->flags
|= NV_ADMA_ATAPI_SETUP_COMPLETE
;
771 if (current_reg
!= new_reg
)
772 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, new_reg
);
774 port0
= ap
->host
->ports
[0]->private_data
;
775 port1
= ap
->host
->ports
[1]->private_data
;
776 sdev0
= ap
->host
->ports
[0]->link
.device
[0].sdev
;
777 sdev1
= ap
->host
->ports
[1]->link
.device
[0].sdev
;
778 if ((port0
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) ||
779 (port1
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)) {
780 /** We have to set the DMA mask to 32-bit if either port is in
781 ATAPI mode, since they are on the same PCI device which is
782 used for DMA mapping. If we set the mask we also need to set
783 the bounce limit on both ports to ensure that the block
784 layer doesn't feed addresses that cause DMA mapping to
785 choke. If either SCSI device is not allocated yet, it's OK
786 since that port will discover its correct setting when it
788 Note: Setting 32-bit mask should not fail. */
790 blk_queue_bounce_limit(sdev0
->request_queue
,
793 blk_queue_bounce_limit(sdev1
->request_queue
,
796 pci_set_dma_mask(pdev
, ATA_DMA_MASK
);
798 /** This shouldn't fail as it was set to this value before */
799 pci_set_dma_mask(pdev
, pp
->adma_dma_mask
);
801 blk_queue_bounce_limit(sdev0
->request_queue
,
804 blk_queue_bounce_limit(sdev1
->request_queue
,
808 blk_queue_segment_boundary(sdev
->request_queue
, segment_boundary
);
809 blk_queue_max_hw_segments(sdev
->request_queue
, sg_tablesize
);
810 ata_port_printk(ap
, KERN_INFO
,
811 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
812 (unsigned long long)*ap
->host
->dev
->dma_mask
,
813 segment_boundary
, sg_tablesize
);
815 spin_unlock_irqrestore(ap
->lock
, flags
);
820 static int nv_adma_check_atapi_dma(struct ata_queued_cmd
*qc
)
822 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
823 return !(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
);
826 static void nv_adma_tf_read(struct ata_port
*ap
, struct ata_taskfile
*tf
)
828 /* Other than when internal or pass-through commands are executed,
829 the only time this function will be called in ADMA mode will be
830 if a command fails. In the failure case we don't care about going
831 into register mode with ADMA commands pending, as the commands will
832 all shortly be aborted anyway. We assume that NCQ commands are not
833 issued via passthrough, which is the only way that switching into
834 ADMA mode could abort outstanding commands. */
835 nv_adma_register_mode(ap
);
840 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile
*tf
, __le16
*cpb
)
842 unsigned int idx
= 0;
844 if (tf
->flags
& ATA_TFLAG_ISADDR
) {
845 if (tf
->flags
& ATA_TFLAG_LBA48
) {
846 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->hob_feature
| WNB
);
847 cpb
[idx
++] = cpu_to_le16((ATA_REG_NSECT
<< 8) | tf
->hob_nsect
);
848 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAL
<< 8) | tf
->hob_lbal
);
849 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAM
<< 8) | tf
->hob_lbam
);
850 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAH
<< 8) | tf
->hob_lbah
);
851 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->feature
);
853 cpb
[idx
++] = cpu_to_le16((ATA_REG_ERR
<< 8) | tf
->feature
| WNB
);
855 cpb
[idx
++] = cpu_to_le16((ATA_REG_NSECT
<< 8) | tf
->nsect
);
856 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAL
<< 8) | tf
->lbal
);
857 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAM
<< 8) | tf
->lbam
);
858 cpb
[idx
++] = cpu_to_le16((ATA_REG_LBAH
<< 8) | tf
->lbah
);
861 if (tf
->flags
& ATA_TFLAG_DEVICE
)
862 cpb
[idx
++] = cpu_to_le16((ATA_REG_DEVICE
<< 8) | tf
->device
);
864 cpb
[idx
++] = cpu_to_le16((ATA_REG_CMD
<< 8) | tf
->command
| CMDEND
);
867 cpb
[idx
++] = cpu_to_le16(IGN
);
872 static int nv_adma_check_cpb(struct ata_port
*ap
, int cpb_num
, int force_err
)
874 struct nv_adma_port_priv
*pp
= ap
->private_data
;
875 u8 flags
= pp
->cpb
[cpb_num
].resp_flags
;
877 VPRINTK("CPB %d, flags=0x%x\n", cpb_num
, flags
);
879 if (unlikely((force_err
||
880 flags
& (NV_CPB_RESP_ATA_ERR
|
881 NV_CPB_RESP_CMD_ERR
|
882 NV_CPB_RESP_CPB_ERR
)))) {
883 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
886 ata_ehi_clear_desc(ehi
);
887 __ata_ehi_push_desc(ehi
, "CPB resp_flags 0x%x: ", flags
);
888 if (flags
& NV_CPB_RESP_ATA_ERR
) {
889 ata_ehi_push_desc(ehi
, "ATA error");
890 ehi
->err_mask
|= AC_ERR_DEV
;
891 } else if (flags
& NV_CPB_RESP_CMD_ERR
) {
892 ata_ehi_push_desc(ehi
, "CMD error");
893 ehi
->err_mask
|= AC_ERR_DEV
;
894 } else if (flags
& NV_CPB_RESP_CPB_ERR
) {
895 ata_ehi_push_desc(ehi
, "CPB error");
896 ehi
->err_mask
|= AC_ERR_SYSTEM
;
899 /* notifier error, but no error in CPB flags? */
900 ata_ehi_push_desc(ehi
, "unknown");
901 ehi
->err_mask
|= AC_ERR_OTHER
;
904 /* Kill all commands. EH will determine what actually failed. */
912 if (likely(flags
& NV_CPB_RESP_DONE
)) {
913 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, cpb_num
);
914 VPRINTK("CPB flags done, flags=0x%x\n", flags
);
916 DPRINTK("Completing qc from tag %d\n", cpb_num
);
919 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
920 /* Notifier bits set without a command may indicate the drive
921 is misbehaving. Raise host state machine violation on this
923 ata_port_printk(ap
, KERN_ERR
,
924 "notifier for tag %d with no cmd?\n",
926 ehi
->err_mask
|= AC_ERR_HSM
;
927 ehi
->action
|= ATA_EH_RESET
;
935 static int nv_host_intr(struct ata_port
*ap
, u8 irq_stat
)
937 struct ata_queued_cmd
*qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
939 /* freeze if hotplugged */
940 if (unlikely(irq_stat
& (NV_INT_ADDED
| NV_INT_REMOVED
))) {
945 /* bail out if not our interrupt */
946 if (!(irq_stat
& NV_INT_DEV
))
949 /* DEV interrupt w/ no active qc? */
950 if (unlikely(!qc
|| (qc
->tf
.flags
& ATA_TFLAG_POLLING
))) {
951 ata_check_status(ap
);
955 /* handle interrupt */
956 return ata_host_intr(ap
, qc
);
959 static irqreturn_t
nv_adma_interrupt(int irq
, void *dev_instance
)
961 struct ata_host
*host
= dev_instance
;
963 u32 notifier_clears
[2];
965 spin_lock(&host
->lock
);
967 for (i
= 0; i
< host
->n_ports
; i
++) {
968 struct ata_port
*ap
= host
->ports
[i
];
969 notifier_clears
[i
] = 0;
971 if (ap
&& !(ap
->flags
& ATA_FLAG_DISABLED
)) {
972 struct nv_adma_port_priv
*pp
= ap
->private_data
;
973 void __iomem
*mmio
= pp
->ctl_block
;
976 u32 notifier
, notifier_error
;
978 /* if ADMA is disabled, use standard ata interrupt handler */
979 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) {
980 u8 irq_stat
= readb(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
)
981 >> (NV_INT_PORT_SHIFT
* i
);
982 handled
+= nv_host_intr(ap
, irq_stat
);
986 /* if in ATA register mode, check for standard interrupts */
987 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
) {
988 u8 irq_stat
= readb(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
)
989 >> (NV_INT_PORT_SHIFT
* i
);
990 if (ata_tag_valid(ap
->link
.active_tag
))
991 /** NV_INT_DEV indication seems unreliable at times
992 at least in ADMA mode. Force it on always when a
993 command is active, to prevent losing interrupts. */
994 irq_stat
|= NV_INT_DEV
;
995 handled
+= nv_host_intr(ap
, irq_stat
);
998 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
999 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
1000 notifier_clears
[i
] = notifier
| notifier_error
;
1002 gen_ctl
= readl(pp
->gen_block
+ NV_ADMA_GEN_CTL
);
1004 if (!NV_ADMA_CHECK_INTR(gen_ctl
, ap
->port_no
) && !notifier
&&
1009 status
= readw(mmio
+ NV_ADMA_STAT
);
1011 /* Clear status. Ensure the controller sees the clearing before we start
1012 looking at any of the CPB statuses, so that any CPB completions after
1013 this point in the handler will raise another interrupt. */
1014 writew(status
, mmio
+ NV_ADMA_STAT
);
1015 readw(mmio
+ NV_ADMA_STAT
); /* flush posted write */
1018 handled
++; /* irq handled if we got here */
1020 /* freeze if hotplugged or controller error */
1021 if (unlikely(status
& (NV_ADMA_STAT_HOTPLUG
|
1022 NV_ADMA_STAT_HOTUNPLUG
|
1023 NV_ADMA_STAT_TIMEOUT
|
1024 NV_ADMA_STAT_SERROR
))) {
1025 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
1027 ata_ehi_clear_desc(ehi
);
1028 __ata_ehi_push_desc(ehi
, "ADMA status 0x%08x: ", status
);
1029 if (status
& NV_ADMA_STAT_TIMEOUT
) {
1030 ehi
->err_mask
|= AC_ERR_SYSTEM
;
1031 ata_ehi_push_desc(ehi
, "timeout");
1032 } else if (status
& NV_ADMA_STAT_HOTPLUG
) {
1033 ata_ehi_hotplugged(ehi
);
1034 ata_ehi_push_desc(ehi
, "hotplug");
1035 } else if (status
& NV_ADMA_STAT_HOTUNPLUG
) {
1036 ata_ehi_hotplugged(ehi
);
1037 ata_ehi_push_desc(ehi
, "hot unplug");
1038 } else if (status
& NV_ADMA_STAT_SERROR
) {
1039 /* let libata analyze SError and figure out the cause */
1040 ata_ehi_push_desc(ehi
, "SError");
1042 ata_ehi_push_desc(ehi
, "unknown");
1043 ata_port_freeze(ap
);
1047 if (status
& (NV_ADMA_STAT_DONE
|
1048 NV_ADMA_STAT_CPBERR
|
1049 NV_ADMA_STAT_CMD_COMPLETE
)) {
1050 u32 check_commands
= notifier_clears
[i
];
1053 if (status
& NV_ADMA_STAT_CPBERR
) {
1054 /* Check all active commands */
1055 if (ata_tag_valid(ap
->link
.active_tag
))
1056 check_commands
= 1 <<
1057 ap
->link
.active_tag
;
1059 check_commands
= ap
->
1063 /** Check CPBs for completed commands */
1064 while ((pos
= ffs(check_commands
)) && !error
) {
1066 error
= nv_adma_check_cpb(ap
, pos
,
1067 notifier_error
& (1 << pos
));
1068 check_commands
&= ~(1 << pos
);
1074 if (notifier_clears
[0] || notifier_clears
[1]) {
1075 /* Note: Both notifier clear registers must be written
1076 if either is set, even if one is zero, according to NVIDIA. */
1077 struct nv_adma_port_priv
*pp
= host
->ports
[0]->private_data
;
1078 writel(notifier_clears
[0], pp
->notifier_clear_block
);
1079 pp
= host
->ports
[1]->private_data
;
1080 writel(notifier_clears
[1], pp
->notifier_clear_block
);
1083 spin_unlock(&host
->lock
);
1085 return IRQ_RETVAL(handled
);
1088 static void nv_adma_freeze(struct ata_port
*ap
)
1090 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1091 void __iomem
*mmio
= pp
->ctl_block
;
1094 nv_ck804_freeze(ap
);
1096 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
1099 /* clear any outstanding CK804 notifications */
1100 writeb(NV_INT_ALL
<< (ap
->port_no
* NV_INT_PORT_SHIFT
),
1101 ap
->host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
);
1103 /* Disable interrupt */
1104 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1105 writew(tmp
& ~(NV_ADMA_CTL_AIEN
| NV_ADMA_CTL_HOTPLUG_IEN
),
1106 mmio
+ NV_ADMA_CTL
);
1107 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1110 static void nv_adma_thaw(struct ata_port
*ap
)
1112 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1113 void __iomem
*mmio
= pp
->ctl_block
;
1118 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
1121 /* Enable interrupt */
1122 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1123 writew(tmp
| (NV_ADMA_CTL_AIEN
| NV_ADMA_CTL_HOTPLUG_IEN
),
1124 mmio
+ NV_ADMA_CTL
);
1125 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1128 static void nv_adma_irq_clear(struct ata_port
*ap
)
1130 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1131 void __iomem
*mmio
= pp
->ctl_block
;
1132 u32 notifier_clears
[2];
1134 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) {
1135 ata_bmdma_irq_clear(ap
);
1139 /* clear any outstanding CK804 notifications */
1140 writeb(NV_INT_ALL
<< (ap
->port_no
* NV_INT_PORT_SHIFT
),
1141 ap
->host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
);
1143 /* clear ADMA status */
1144 writew(0xffff, mmio
+ NV_ADMA_STAT
);
1146 /* clear notifiers - note both ports need to be written with
1147 something even though we are only clearing on one */
1148 if (ap
->port_no
== 0) {
1149 notifier_clears
[0] = 0xFFFFFFFF;
1150 notifier_clears
[1] = 0;
1152 notifier_clears
[0] = 0;
1153 notifier_clears
[1] = 0xFFFFFFFF;
1155 pp
= ap
->host
->ports
[0]->private_data
;
1156 writel(notifier_clears
[0], pp
->notifier_clear_block
);
1157 pp
= ap
->host
->ports
[1]->private_data
;
1158 writel(notifier_clears
[1], pp
->notifier_clear_block
);
1161 static void nv_adma_post_internal_cmd(struct ata_queued_cmd
*qc
)
1163 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1165 if (pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)
1166 ata_bmdma_post_internal_cmd(qc
);
1169 static int nv_adma_port_start(struct ata_port
*ap
)
1171 struct device
*dev
= ap
->host
->dev
;
1172 struct nv_adma_port_priv
*pp
;
1177 struct pci_dev
*pdev
= to_pci_dev(dev
);
1182 /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1184 rc
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1187 rc
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
1191 rc
= ata_port_start(ap
);
1195 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
1199 mmio
= ap
->host
->iomap
[NV_MMIO_BAR
] + NV_ADMA_PORT
+
1200 ap
->port_no
* NV_ADMA_PORT_SIZE
;
1201 pp
->ctl_block
= mmio
;
1202 pp
->gen_block
= ap
->host
->iomap
[NV_MMIO_BAR
] + NV_ADMA_GEN
;
1203 pp
->notifier_clear_block
= pp
->gen_block
+
1204 NV_ADMA_NOTIFIER_CLEAR
+ (4 * ap
->port_no
);
1206 /* Now that the legacy PRD and padding buffer are allocated we can
1207 safely raise the DMA mask to allocate the CPB/APRD table.
1208 These are allowed to fail since we store the value that ends up
1209 being used to set as the bounce limit in slave_config later if
1211 pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1212 pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1213 pp
->adma_dma_mask
= *dev
->dma_mask
;
1215 mem
= dmam_alloc_coherent(dev
, NV_ADMA_PORT_PRIV_DMA_SZ
,
1216 &mem_dma
, GFP_KERNEL
);
1219 memset(mem
, 0, NV_ADMA_PORT_PRIV_DMA_SZ
);
1222 * First item in chunk of DMA memory:
1223 * 128-byte command parameter block (CPB)
1224 * one for each command tag
1227 pp
->cpb_dma
= mem_dma
;
1229 writel(mem_dma
& 0xFFFFFFFF, mmio
+ NV_ADMA_CPB_BASE_LOW
);
1230 writel((mem_dma
>> 16) >> 16, mmio
+ NV_ADMA_CPB_BASE_HIGH
);
1232 mem
+= NV_ADMA_MAX_CPBS
* NV_ADMA_CPB_SZ
;
1233 mem_dma
+= NV_ADMA_MAX_CPBS
* NV_ADMA_CPB_SZ
;
1236 * Second item: block of ADMA_SGTBL_LEN s/g entries
1239 pp
->aprd_dma
= mem_dma
;
1241 ap
->private_data
= pp
;
1243 /* clear any outstanding interrupt conditions */
1244 writew(0xffff, mmio
+ NV_ADMA_STAT
);
1246 /* initialize port variables */
1247 pp
->flags
= NV_ADMA_PORT_REGISTER_MODE
;
1249 /* clear CPB fetch count */
1250 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1252 /* clear GO for register mode, enable interrupt */
1253 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1254 writew((tmp
& ~NV_ADMA_CTL_GO
) | NV_ADMA_CTL_AIEN
|
1255 NV_ADMA_CTL_HOTPLUG_IEN
, mmio
+ NV_ADMA_CTL
);
1257 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1258 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1259 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1261 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1262 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1267 static void nv_adma_port_stop(struct ata_port
*ap
)
1269 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1270 void __iomem
*mmio
= pp
->ctl_block
;
1273 writew(0, mmio
+ NV_ADMA_CTL
);
1277 static int nv_adma_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
1279 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1280 void __iomem
*mmio
= pp
->ctl_block
;
1282 /* Go to register mode - clears GO */
1283 nv_adma_register_mode(ap
);
1285 /* clear CPB fetch count */
1286 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1288 /* disable interrupt, shut down port */
1289 writew(0, mmio
+ NV_ADMA_CTL
);
1294 static int nv_adma_port_resume(struct ata_port
*ap
)
1296 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1297 void __iomem
*mmio
= pp
->ctl_block
;
1300 /* set CPB block location */
1301 writel(pp
->cpb_dma
& 0xFFFFFFFF, mmio
+ NV_ADMA_CPB_BASE_LOW
);
1302 writel((pp
->cpb_dma
>> 16) >> 16, mmio
+ NV_ADMA_CPB_BASE_HIGH
);
1304 /* clear any outstanding interrupt conditions */
1305 writew(0xffff, mmio
+ NV_ADMA_STAT
);
1307 /* initialize port variables */
1308 pp
->flags
|= NV_ADMA_PORT_REGISTER_MODE
;
1310 /* clear CPB fetch count */
1311 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1313 /* clear GO for register mode, enable interrupt */
1314 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1315 writew((tmp
& ~NV_ADMA_CTL_GO
) | NV_ADMA_CTL_AIEN
|
1316 NV_ADMA_CTL_HOTPLUG_IEN
, mmio
+ NV_ADMA_CTL
);
1318 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1319 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1320 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1322 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1323 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1329 static void nv_adma_setup_port(struct ata_port
*ap
)
1331 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1332 struct ata_ioports
*ioport
= &ap
->ioaddr
;
1336 mmio
+= NV_ADMA_PORT
+ ap
->port_no
* NV_ADMA_PORT_SIZE
;
1338 ioport
->cmd_addr
= mmio
;
1339 ioport
->data_addr
= mmio
+ (ATA_REG_DATA
* 4);
1340 ioport
->error_addr
=
1341 ioport
->feature_addr
= mmio
+ (ATA_REG_ERR
* 4);
1342 ioport
->nsect_addr
= mmio
+ (ATA_REG_NSECT
* 4);
1343 ioport
->lbal_addr
= mmio
+ (ATA_REG_LBAL
* 4);
1344 ioport
->lbam_addr
= mmio
+ (ATA_REG_LBAM
* 4);
1345 ioport
->lbah_addr
= mmio
+ (ATA_REG_LBAH
* 4);
1346 ioport
->device_addr
= mmio
+ (ATA_REG_DEVICE
* 4);
1347 ioport
->status_addr
=
1348 ioport
->command_addr
= mmio
+ (ATA_REG_STATUS
* 4);
1349 ioport
->altstatus_addr
=
1350 ioport
->ctl_addr
= mmio
+ 0x20;
1353 static int nv_adma_host_init(struct ata_host
*host
)
1355 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1361 /* enable ADMA on the ports */
1362 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
1363 tmp32
|= NV_MCP_SATA_CFG_20_PORT0_EN
|
1364 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
|
1365 NV_MCP_SATA_CFG_20_PORT1_EN
|
1366 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
;
1368 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
1370 for (i
= 0; i
< host
->n_ports
; i
++)
1371 nv_adma_setup_port(host
->ports
[i
]);
1376 static void nv_adma_fill_aprd(struct ata_queued_cmd
*qc
,
1377 struct scatterlist
*sg
,
1379 struct nv_adma_prd
*aprd
)
1382 if (qc
->tf
.flags
& ATA_TFLAG_WRITE
)
1383 flags
|= NV_APRD_WRITE
;
1384 if (idx
== qc
->n_elem
- 1)
1385 flags
|= NV_APRD_END
;
1387 flags
|= NV_APRD_CONT
;
1389 aprd
->addr
= cpu_to_le64(((u64
)sg_dma_address(sg
)));
1390 aprd
->len
= cpu_to_le32(((u32
)sg_dma_len(sg
))); /* len in bytes */
1391 aprd
->flags
= flags
;
1392 aprd
->packet_len
= 0;
1395 static void nv_adma_fill_sg(struct ata_queued_cmd
*qc
, struct nv_adma_cpb
*cpb
)
1397 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1398 struct nv_adma_prd
*aprd
;
1399 struct scatterlist
*sg
;
1404 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
1405 aprd
= (si
< 5) ? &cpb
->aprd
[si
] :
1406 &pp
->aprd
[NV_ADMA_SGTBL_LEN
* qc
->tag
+ (si
-5)];
1407 nv_adma_fill_aprd(qc
, sg
, si
, aprd
);
1410 cpb
->next_aprd
= cpu_to_le64(((u64
)(pp
->aprd_dma
+ NV_ADMA_SGTBL_SZ
* qc
->tag
)));
1412 cpb
->next_aprd
= cpu_to_le64(0);
1415 static int nv_adma_use_reg_mode(struct ata_queued_cmd
*qc
)
1417 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1419 /* ADMA engine can only be used for non-ATAPI DMA commands,
1420 or interrupt-driven no-data commands. */
1421 if ((pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) ||
1422 (qc
->tf
.flags
& ATA_TFLAG_POLLING
))
1425 if ((qc
->flags
& ATA_QCFLAG_DMAMAP
) ||
1426 (qc
->tf
.protocol
== ATA_PROT_NODATA
))
1432 static void nv_adma_qc_prep(struct ata_queued_cmd
*qc
)
1434 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1435 struct nv_adma_cpb
*cpb
= &pp
->cpb
[qc
->tag
];
1436 u8 ctl_flags
= NV_CPB_CTL_CPB_VALID
|
1439 if (nv_adma_use_reg_mode(qc
)) {
1440 BUG_ON(!(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) &&
1441 (qc
->flags
& ATA_QCFLAG_DMAMAP
));
1442 nv_adma_register_mode(qc
->ap
);
1447 cpb
->resp_flags
= NV_CPB_RESP_DONE
;
1454 cpb
->next_cpb_idx
= 0;
1456 /* turn on NCQ flags for NCQ commands */
1457 if (qc
->tf
.protocol
== ATA_PROT_NCQ
)
1458 ctl_flags
|= NV_CPB_CTL_QUEUE
| NV_CPB_CTL_FPDMA
;
1460 VPRINTK("qc->flags = 0x%lx\n", qc
->flags
);
1462 nv_adma_tf_to_cpb(&qc
->tf
, cpb
->tf
);
1464 if (qc
->flags
& ATA_QCFLAG_DMAMAP
) {
1465 nv_adma_fill_sg(qc
, cpb
);
1466 ctl_flags
|= NV_CPB_CTL_APRD_VALID
;
1468 memset(&cpb
->aprd
[0], 0, sizeof(struct nv_adma_prd
) * 5);
1470 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1471 until we are finished filling in all of the contents */
1473 cpb
->ctl_flags
= ctl_flags
;
1475 cpb
->resp_flags
= 0;
1478 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd
*qc
)
1480 struct nv_adma_port_priv
*pp
= qc
->ap
->private_data
;
1481 void __iomem
*mmio
= pp
->ctl_block
;
1482 int curr_ncq
= (qc
->tf
.protocol
== ATA_PROT_NCQ
);
1486 /* We can't handle result taskfile with NCQ commands, since
1487 retrieving the taskfile switches us out of ADMA mode and would abort
1488 existing commands. */
1489 if (unlikely(qc
->tf
.protocol
== ATA_PROT_NCQ
&&
1490 (qc
->flags
& ATA_QCFLAG_RESULT_TF
))) {
1491 ata_dev_printk(qc
->dev
, KERN_ERR
,
1492 "NCQ w/ RESULT_TF not allowed\n");
1493 return AC_ERR_SYSTEM
;
1496 if (nv_adma_use_reg_mode(qc
)) {
1497 /* use ATA register mode */
1498 VPRINTK("using ATA register mode: 0x%lx\n", qc
->flags
);
1499 BUG_ON(!(pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
) &&
1500 (qc
->flags
& ATA_QCFLAG_DMAMAP
));
1501 nv_adma_register_mode(qc
->ap
);
1502 return ata_qc_issue_prot(qc
);
1504 nv_adma_mode(qc
->ap
);
1506 /* write append register, command tag in lower 8 bits
1507 and (number of cpbs to append -1) in top 8 bits */
1510 if (curr_ncq
!= pp
->last_issue_ncq
) {
1511 /* Seems to need some delay before switching between NCQ and
1512 non-NCQ commands, else we get command timeouts and such. */
1514 pp
->last_issue_ncq
= curr_ncq
;
1517 writew(qc
->tag
, mmio
+ NV_ADMA_APPEND
);
1519 DPRINTK("Issued tag %u\n", qc
->tag
);
1524 static irqreturn_t
nv_generic_interrupt(int irq
, void *dev_instance
)
1526 struct ata_host
*host
= dev_instance
;
1528 unsigned int handled
= 0;
1529 unsigned long flags
;
1531 spin_lock_irqsave(&host
->lock
, flags
);
1533 for (i
= 0; i
< host
->n_ports
; i
++) {
1534 struct ata_port
*ap
;
1536 ap
= host
->ports
[i
];
1538 !(ap
->flags
& ATA_FLAG_DISABLED
)) {
1539 struct ata_queued_cmd
*qc
;
1541 qc
= ata_qc_from_tag(ap
, ap
->link
.active_tag
);
1542 if (qc
&& (!(qc
->tf
.flags
& ATA_TFLAG_POLLING
)))
1543 handled
+= ata_host_intr(ap
, qc
);
1545 // No request pending? Clear interrupt status
1546 // anyway, in case there's one pending.
1547 ap
->ops
->check_status(ap
);
1552 spin_unlock_irqrestore(&host
->lock
, flags
);
1554 return IRQ_RETVAL(handled
);
1557 static irqreturn_t
nv_do_interrupt(struct ata_host
*host
, u8 irq_stat
)
1561 for (i
= 0; i
< host
->n_ports
; i
++) {
1562 struct ata_port
*ap
= host
->ports
[i
];
1564 if (ap
&& !(ap
->flags
& ATA_FLAG_DISABLED
))
1565 handled
+= nv_host_intr(ap
, irq_stat
);
1567 irq_stat
>>= NV_INT_PORT_SHIFT
;
1570 return IRQ_RETVAL(handled
);
1573 static irqreturn_t
nv_nf2_interrupt(int irq
, void *dev_instance
)
1575 struct ata_host
*host
= dev_instance
;
1579 spin_lock(&host
->lock
);
1580 irq_stat
= ioread8(host
->ports
[0]->ioaddr
.scr_addr
+ NV_INT_STATUS
);
1581 ret
= nv_do_interrupt(host
, irq_stat
);
1582 spin_unlock(&host
->lock
);
1587 static irqreturn_t
nv_ck804_interrupt(int irq
, void *dev_instance
)
1589 struct ata_host
*host
= dev_instance
;
1593 spin_lock(&host
->lock
);
1594 irq_stat
= readb(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_CK804
);
1595 ret
= nv_do_interrupt(host
, irq_stat
);
1596 spin_unlock(&host
->lock
);
1601 static int nv_scr_read(struct ata_port
*ap
, unsigned int sc_reg
, u32
*val
)
1603 if (sc_reg
> SCR_CONTROL
)
1606 *val
= ioread32(ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
1610 static int nv_scr_write(struct ata_port
*ap
, unsigned int sc_reg
, u32 val
)
1612 if (sc_reg
> SCR_CONTROL
)
1615 iowrite32(val
, ap
->ioaddr
.scr_addr
+ (sc_reg
* 4));
1619 static void nv_nf2_freeze(struct ata_port
*ap
)
1621 void __iomem
*scr_addr
= ap
->host
->ports
[0]->ioaddr
.scr_addr
;
1622 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1625 mask
= ioread8(scr_addr
+ NV_INT_ENABLE
);
1626 mask
&= ~(NV_INT_ALL
<< shift
);
1627 iowrite8(mask
, scr_addr
+ NV_INT_ENABLE
);
1630 static void nv_nf2_thaw(struct ata_port
*ap
)
1632 void __iomem
*scr_addr
= ap
->host
->ports
[0]->ioaddr
.scr_addr
;
1633 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1636 iowrite8(NV_INT_ALL
<< shift
, scr_addr
+ NV_INT_STATUS
);
1638 mask
= ioread8(scr_addr
+ NV_INT_ENABLE
);
1639 mask
|= (NV_INT_MASK
<< shift
);
1640 iowrite8(mask
, scr_addr
+ NV_INT_ENABLE
);
1643 static void nv_ck804_freeze(struct ata_port
*ap
)
1645 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1646 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1649 mask
= readb(mmio_base
+ NV_INT_ENABLE_CK804
);
1650 mask
&= ~(NV_INT_ALL
<< shift
);
1651 writeb(mask
, mmio_base
+ NV_INT_ENABLE_CK804
);
1654 static void nv_ck804_thaw(struct ata_port
*ap
)
1656 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1657 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT
;
1660 writeb(NV_INT_ALL
<< shift
, mmio_base
+ NV_INT_STATUS_CK804
);
1662 mask
= readb(mmio_base
+ NV_INT_ENABLE_CK804
);
1663 mask
|= (NV_INT_MASK
<< shift
);
1664 writeb(mask
, mmio_base
+ NV_INT_ENABLE_CK804
);
1667 static void nv_mcp55_freeze(struct ata_port
*ap
)
1669 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1670 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT_MCP55
;
1673 writel(NV_INT_ALL_MCP55
<< shift
, mmio_base
+ NV_INT_STATUS_MCP55
);
1675 mask
= readl(mmio_base
+ NV_INT_ENABLE_MCP55
);
1676 mask
&= ~(NV_INT_ALL_MCP55
<< shift
);
1677 writel(mask
, mmio_base
+ NV_INT_ENABLE_MCP55
);
1678 ata_bmdma_freeze(ap
);
1681 static void nv_mcp55_thaw(struct ata_port
*ap
)
1683 void __iomem
*mmio_base
= ap
->host
->iomap
[NV_MMIO_BAR
];
1684 int shift
= ap
->port_no
* NV_INT_PORT_SHIFT_MCP55
;
1687 writel(NV_INT_ALL_MCP55
<< shift
, mmio_base
+ NV_INT_STATUS_MCP55
);
1689 mask
= readl(mmio_base
+ NV_INT_ENABLE_MCP55
);
1690 mask
|= (NV_INT_MASK_MCP55
<< shift
);
1691 writel(mask
, mmio_base
+ NV_INT_ENABLE_MCP55
);
1695 static int nv_hardreset(struct ata_link
*link
, unsigned int *class,
1696 unsigned long deadline
)
1700 /* SATA hardreset fails to retrieve proper device signature on
1701 * some controllers. Don't classify on hardreset. For more
1702 * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1704 return sata_std_hardreset(link
, &dummy
, deadline
);
1707 static void nv_error_handler(struct ata_port
*ap
)
1709 ata_bmdma_drive_eh(ap
, ata_std_prereset
, ata_std_softreset
,
1710 nv_hardreset
, ata_std_postreset
);
1713 static void nv_adma_error_handler(struct ata_port
*ap
)
1715 struct nv_adma_port_priv
*pp
= ap
->private_data
;
1716 if (!(pp
->flags
& NV_ADMA_PORT_REGISTER_MODE
)) {
1717 void __iomem
*mmio
= pp
->ctl_block
;
1721 if (ata_tag_valid(ap
->link
.active_tag
) || ap
->link
.sactive
) {
1722 u32 notifier
= readl(mmio
+ NV_ADMA_NOTIFIER
);
1723 u32 notifier_error
= readl(mmio
+ NV_ADMA_NOTIFIER_ERROR
);
1724 u32 gen_ctl
= readl(pp
->gen_block
+ NV_ADMA_GEN_CTL
);
1725 u32 status
= readw(mmio
+ NV_ADMA_STAT
);
1726 u8 cpb_count
= readb(mmio
+ NV_ADMA_CPB_COUNT
);
1727 u8 next_cpb_idx
= readb(mmio
+ NV_ADMA_NEXT_CPB_IDX
);
1729 ata_port_printk(ap
, KERN_ERR
,
1730 "EH in ADMA mode, notifier 0x%X "
1731 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1732 "next cpb count 0x%X next cpb idx 0x%x\n",
1733 notifier
, notifier_error
, gen_ctl
, status
,
1734 cpb_count
, next_cpb_idx
);
1736 for (i
= 0; i
< NV_ADMA_MAX_CPBS
; i
++) {
1737 struct nv_adma_cpb
*cpb
= &pp
->cpb
[i
];
1738 if ((ata_tag_valid(ap
->link
.active_tag
) && i
== ap
->link
.active_tag
) ||
1739 ap
->link
.sactive
& (1 << i
))
1740 ata_port_printk(ap
, KERN_ERR
,
1741 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1742 i
, cpb
->ctl_flags
, cpb
->resp_flags
);
1746 /* Push us back into port register mode for error handling. */
1747 nv_adma_register_mode(ap
);
1749 /* Mark all of the CPBs as invalid to prevent them from
1751 for (i
= 0; i
< NV_ADMA_MAX_CPBS
; i
++)
1752 pp
->cpb
[i
].ctl_flags
&= ~NV_CPB_CTL_CPB_VALID
;
1754 /* clear CPB fetch count */
1755 writew(0, mmio
+ NV_ADMA_CPB_COUNT
);
1758 tmp
= readw(mmio
+ NV_ADMA_CTL
);
1759 writew(tmp
| NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1760 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1762 writew(tmp
& ~NV_ADMA_CTL_CHANNEL_RESET
, mmio
+ NV_ADMA_CTL
);
1763 readw(mmio
+ NV_ADMA_CTL
); /* flush posted write */
1766 ata_bmdma_drive_eh(ap
, ata_std_prereset
, ata_std_softreset
,
1767 nv_hardreset
, ata_std_postreset
);
1770 static void nv_swncq_qc_to_dq(struct ata_port
*ap
, struct ata_queued_cmd
*qc
)
1772 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1773 struct defer_queue
*dq
= &pp
->defer_queue
;
1776 WARN_ON(dq
->tail
- dq
->head
== ATA_MAX_QUEUE
);
1777 dq
->defer_bits
|= (1 << qc
->tag
);
1778 dq
->tag
[dq
->tail
++ & (ATA_MAX_QUEUE
- 1)] = qc
->tag
;
1781 static struct ata_queued_cmd
*nv_swncq_qc_from_dq(struct ata_port
*ap
)
1783 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1784 struct defer_queue
*dq
= &pp
->defer_queue
;
1787 if (dq
->head
== dq
->tail
) /* null queue */
1790 tag
= dq
->tag
[dq
->head
& (ATA_MAX_QUEUE
- 1)];
1791 dq
->tag
[dq
->head
++ & (ATA_MAX_QUEUE
- 1)] = ATA_TAG_POISON
;
1792 WARN_ON(!(dq
->defer_bits
& (1 << tag
)));
1793 dq
->defer_bits
&= ~(1 << tag
);
1795 return ata_qc_from_tag(ap
, tag
);
1798 static void nv_swncq_fis_reinit(struct ata_port
*ap
)
1800 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1803 pp
->dmafis_bits
= 0;
1804 pp
->sdbfis_bits
= 0;
1808 static void nv_swncq_pp_reinit(struct ata_port
*ap
)
1810 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1811 struct defer_queue
*dq
= &pp
->defer_queue
;
1817 pp
->last_issue_tag
= ATA_TAG_POISON
;
1818 nv_swncq_fis_reinit(ap
);
1821 static void nv_swncq_irq_clear(struct ata_port
*ap
, u16 fis
)
1823 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1825 writew(fis
, pp
->irq_block
);
1828 static void __ata_bmdma_stop(struct ata_port
*ap
)
1830 struct ata_queued_cmd qc
;
1833 ata_bmdma_stop(&qc
);
1836 static void nv_swncq_ncq_stop(struct ata_port
*ap
)
1838 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
1843 ata_port_printk(ap
, KERN_ERR
,
1844 "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1845 ap
->qc_active
, ap
->link
.sactive
);
1846 ata_port_printk(ap
, KERN_ERR
,
1847 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1848 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1849 pp
->qc_active
, pp
->defer_queue
.defer_bits
, pp
->last_issue_tag
,
1850 pp
->dhfis_bits
, pp
->dmafis_bits
, pp
->sdbfis_bits
);
1852 ata_port_printk(ap
, KERN_ERR
, "ATA_REG 0x%X ERR_REG 0x%X\n",
1853 ap
->ops
->check_status(ap
),
1854 ioread8(ap
->ioaddr
.error_addr
));
1856 sactive
= readl(pp
->sactive_block
);
1857 done_mask
= pp
->qc_active
^ sactive
;
1859 ata_port_printk(ap
, KERN_ERR
, "tag : dhfis dmafis sdbfis sacitve\n");
1860 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
1862 if (pp
->qc_active
& (1 << i
))
1864 else if (done_mask
& (1 << i
))
1869 ata_port_printk(ap
, KERN_ERR
,
1870 "tag 0x%x: %01x %01x %01x %01x %s\n", i
,
1871 (pp
->dhfis_bits
>> i
) & 0x1,
1872 (pp
->dmafis_bits
>> i
) & 0x1,
1873 (pp
->sdbfis_bits
>> i
) & 0x1,
1874 (sactive
>> i
) & 0x1,
1875 (err
? "error! tag doesn't exit" : " "));
1878 nv_swncq_pp_reinit(ap
);
1879 ap
->ops
->irq_clear(ap
);
1880 __ata_bmdma_stop(ap
);
1881 nv_swncq_irq_clear(ap
, 0xffff);
1884 static void nv_swncq_error_handler(struct ata_port
*ap
)
1886 struct ata_eh_context
*ehc
= &ap
->link
.eh_context
;
1888 if (ap
->link
.sactive
) {
1889 nv_swncq_ncq_stop(ap
);
1890 ehc
->i
.action
|= ATA_EH_RESET
;
1893 ata_bmdma_drive_eh(ap
, ata_std_prereset
, ata_std_softreset
,
1894 nv_hardreset
, ata_std_postreset
);
1898 static int nv_swncq_port_suspend(struct ata_port
*ap
, pm_message_t mesg
)
1900 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1904 writel(~0, mmio
+ NV_INT_STATUS_MCP55
);
1907 writel(0, mmio
+ NV_INT_ENABLE_MCP55
);
1910 tmp
= readl(mmio
+ NV_CTL_MCP55
);
1911 tmp
&= ~(NV_CTL_PRI_SWNCQ
| NV_CTL_SEC_SWNCQ
);
1912 writel(tmp
, mmio
+ NV_CTL_MCP55
);
1917 static int nv_swncq_port_resume(struct ata_port
*ap
)
1919 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
1923 writel(~0, mmio
+ NV_INT_STATUS_MCP55
);
1926 writel(0x00fd00fd, mmio
+ NV_INT_ENABLE_MCP55
);
1929 tmp
= readl(mmio
+ NV_CTL_MCP55
);
1930 writel(tmp
| NV_CTL_PRI_SWNCQ
| NV_CTL_SEC_SWNCQ
, mmio
+ NV_CTL_MCP55
);
1936 static void nv_swncq_host_init(struct ata_host
*host
)
1939 void __iomem
*mmio
= host
->iomap
[NV_MMIO_BAR
];
1940 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
1943 /* disable ECO 398 */
1944 pci_read_config_byte(pdev
, 0x7f, ®val
);
1945 regval
&= ~(1 << 7);
1946 pci_write_config_byte(pdev
, 0x7f, regval
);
1949 tmp
= readl(mmio
+ NV_CTL_MCP55
);
1950 VPRINTK("HOST_CTL:0x%X\n", tmp
);
1951 writel(tmp
| NV_CTL_PRI_SWNCQ
| NV_CTL_SEC_SWNCQ
, mmio
+ NV_CTL_MCP55
);
1953 /* enable irq intr */
1954 tmp
= readl(mmio
+ NV_INT_ENABLE_MCP55
);
1955 VPRINTK("HOST_ENABLE:0x%X\n", tmp
);
1956 writel(tmp
| 0x00fd00fd, mmio
+ NV_INT_ENABLE_MCP55
);
1958 /* clear port irq */
1959 writel(~0x0, mmio
+ NV_INT_STATUS_MCP55
);
1962 static int nv_swncq_slave_config(struct scsi_device
*sdev
)
1964 struct ata_port
*ap
= ata_shost_to_port(sdev
->host
);
1965 struct pci_dev
*pdev
= to_pci_dev(ap
->host
->dev
);
1966 struct ata_device
*dev
;
1969 u8 check_maxtor
= 0;
1970 unsigned char model_num
[ATA_ID_PROD_LEN
+ 1];
1972 rc
= ata_scsi_slave_config(sdev
);
1973 if (sdev
->id
>= ATA_MAX_DEVICES
|| sdev
->channel
|| sdev
->lun
)
1974 /* Not a proper libata device, ignore */
1977 dev
= &ap
->link
.device
[sdev
->id
];
1978 if (!(ap
->flags
& ATA_FLAG_NCQ
) || dev
->class == ATA_DEV_ATAPI
)
1981 /* if MCP51 and Maxtor, then disable ncq */
1982 if (pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA
||
1983 pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2
)
1986 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1987 if (pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA
||
1988 pdev
->device
== PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2
) {
1989 pci_read_config_byte(pdev
, 0x8, &rev
);
1997 ata_id_c_string(dev
->id
, model_num
, ATA_ID_PROD
, sizeof(model_num
));
1999 if (strncmp(model_num
, "Maxtor", 6) == 0) {
2000 ata_scsi_change_queue_depth(sdev
, 1);
2001 ata_dev_printk(dev
, KERN_NOTICE
,
2002 "Disabling SWNCQ mode (depth %x)\n", sdev
->queue_depth
);
2008 static int nv_swncq_port_start(struct ata_port
*ap
)
2010 struct device
*dev
= ap
->host
->dev
;
2011 void __iomem
*mmio
= ap
->host
->iomap
[NV_MMIO_BAR
];
2012 struct nv_swncq_port_priv
*pp
;
2015 rc
= ata_port_start(ap
);
2019 pp
= devm_kzalloc(dev
, sizeof(*pp
), GFP_KERNEL
);
2023 pp
->prd
= dmam_alloc_coherent(dev
, ATA_PRD_TBL_SZ
* ATA_MAX_QUEUE
,
2024 &pp
->prd_dma
, GFP_KERNEL
);
2027 memset(pp
->prd
, 0, ATA_PRD_TBL_SZ
* ATA_MAX_QUEUE
);
2029 ap
->private_data
= pp
;
2030 pp
->sactive_block
= ap
->ioaddr
.scr_addr
+ 4 * SCR_ACTIVE
;
2031 pp
->irq_block
= mmio
+ NV_INT_STATUS_MCP55
+ ap
->port_no
* 2;
2032 pp
->tag_block
= mmio
+ NV_NCQ_REG_MCP55
+ ap
->port_no
* 2;
2037 static void nv_swncq_qc_prep(struct ata_queued_cmd
*qc
)
2039 if (qc
->tf
.protocol
!= ATA_PROT_NCQ
) {
2044 if (!(qc
->flags
& ATA_QCFLAG_DMAMAP
))
2047 nv_swncq_fill_sg(qc
);
2050 static void nv_swncq_fill_sg(struct ata_queued_cmd
*qc
)
2052 struct ata_port
*ap
= qc
->ap
;
2053 struct scatterlist
*sg
;
2054 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2055 struct ata_prd
*prd
;
2056 unsigned int si
, idx
;
2058 prd
= pp
->prd
+ ATA_MAX_PRD
* qc
->tag
;
2061 for_each_sg(qc
->sg
, sg
, qc
->n_elem
, si
) {
2065 addr
= (u32
)sg_dma_address(sg
);
2066 sg_len
= sg_dma_len(sg
);
2069 offset
= addr
& 0xffff;
2071 if ((offset
+ sg_len
) > 0x10000)
2072 len
= 0x10000 - offset
;
2074 prd
[idx
].addr
= cpu_to_le32(addr
);
2075 prd
[idx
].flags_len
= cpu_to_le32(len
& 0xffff);
2083 prd
[idx
- 1].flags_len
|= cpu_to_le32(ATA_PRD_EOT
);
2086 static unsigned int nv_swncq_issue_atacmd(struct ata_port
*ap
,
2087 struct ata_queued_cmd
*qc
)
2089 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2096 writel((1 << qc
->tag
), pp
->sactive_block
);
2097 pp
->last_issue_tag
= qc
->tag
;
2098 pp
->dhfis_bits
&= ~(1 << qc
->tag
);
2099 pp
->dmafis_bits
&= ~(1 << qc
->tag
);
2100 pp
->qc_active
|= (0x1 << qc
->tag
);
2102 ap
->ops
->tf_load(ap
, &qc
->tf
); /* load tf registers */
2103 ap
->ops
->exec_command(ap
, &qc
->tf
);
2105 DPRINTK("Issued tag %u\n", qc
->tag
);
2110 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd
*qc
)
2112 struct ata_port
*ap
= qc
->ap
;
2113 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2115 if (qc
->tf
.protocol
!= ATA_PROT_NCQ
)
2116 return ata_qc_issue_prot(qc
);
2121 nv_swncq_issue_atacmd(ap
, qc
);
2123 nv_swncq_qc_to_dq(ap
, qc
); /* add qc to defer queue */
2128 static void nv_swncq_hotplug(struct ata_port
*ap
, u32 fis
)
2131 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2133 ata_ehi_clear_desc(ehi
);
2135 /* AHCI needs SError cleared; otherwise, it might lock up */
2136 sata_scr_read(&ap
->link
, SCR_ERROR
, &serror
);
2137 sata_scr_write(&ap
->link
, SCR_ERROR
, serror
);
2139 /* analyze @irq_stat */
2140 if (fis
& NV_SWNCQ_IRQ_ADDED
)
2141 ata_ehi_push_desc(ehi
, "hot plug");
2142 else if (fis
& NV_SWNCQ_IRQ_REMOVED
)
2143 ata_ehi_push_desc(ehi
, "hot unplug");
2145 ata_ehi_hotplugged(ehi
);
2147 /* okay, let's hand over to EH */
2148 ehi
->serror
|= serror
;
2150 ata_port_freeze(ap
);
2153 static int nv_swncq_sdbfis(struct ata_port
*ap
)
2155 struct ata_queued_cmd
*qc
;
2156 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2157 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2165 host_stat
= ap
->ops
->bmdma_status(ap
);
2166 if (unlikely(host_stat
& ATA_DMA_ERR
)) {
2167 /* error when transfering data to/from memory */
2168 ata_ehi_clear_desc(ehi
);
2169 ata_ehi_push_desc(ehi
, "BMDMA stat 0x%x", host_stat
);
2170 ehi
->err_mask
|= AC_ERR_HOST_BUS
;
2171 ehi
->action
|= ATA_EH_RESET
;
2175 ap
->ops
->irq_clear(ap
);
2176 __ata_bmdma_stop(ap
);
2178 sactive
= readl(pp
->sactive_block
);
2179 done_mask
= pp
->qc_active
^ sactive
;
2181 if (unlikely(done_mask
& sactive
)) {
2182 ata_ehi_clear_desc(ehi
);
2183 ata_ehi_push_desc(ehi
, "illegal SWNCQ:qc_active transition"
2184 "(%08x->%08x)", pp
->qc_active
, sactive
);
2185 ehi
->err_mask
|= AC_ERR_HSM
;
2186 ehi
->action
|= ATA_EH_RESET
;
2189 for (i
= 0; i
< ATA_MAX_QUEUE
; i
++) {
2190 if (!(done_mask
& (1 << i
)))
2193 qc
= ata_qc_from_tag(ap
, i
);
2195 ata_qc_complete(qc
);
2196 pp
->qc_active
&= ~(1 << i
);
2197 pp
->dhfis_bits
&= ~(1 << i
);
2198 pp
->dmafis_bits
&= ~(1 << i
);
2199 pp
->sdbfis_bits
|= (1 << i
);
2204 if (!ap
->qc_active
) {
2206 nv_swncq_pp_reinit(ap
);
2210 if (pp
->qc_active
& pp
->dhfis_bits
)
2213 if ((pp
->ncq_flags
& ncq_saw_backout
) ||
2214 (pp
->qc_active
^ pp
->dhfis_bits
))
2215 /* if the controller cann't get a device to host register FIS,
2216 * The driver needs to reissue the new command.
2220 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2221 "SWNCQ:qc_active 0x%X defer_bits %X "
2222 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2223 ap
->print_id
, ap
->qc_active
, pp
->qc_active
,
2224 pp
->defer_queue
.defer_bits
, pp
->dhfis_bits
,
2225 pp
->dmafis_bits
, pp
->last_issue_tag
);
2227 nv_swncq_fis_reinit(ap
);
2230 qc
= ata_qc_from_tag(ap
, pp
->last_issue_tag
);
2231 nv_swncq_issue_atacmd(ap
, qc
);
2235 if (pp
->defer_queue
.defer_bits
) {
2236 /* send deferral queue command */
2237 qc
= nv_swncq_qc_from_dq(ap
);
2238 WARN_ON(qc
== NULL
);
2239 nv_swncq_issue_atacmd(ap
, qc
);
2245 static inline u32
nv_swncq_tag(struct ata_port
*ap
)
2247 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2250 tag
= readb(pp
->tag_block
) >> 2;
2251 return (tag
& 0x1f);
2254 static int nv_swncq_dmafis(struct ata_port
*ap
)
2256 struct ata_queued_cmd
*qc
;
2260 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2262 __ata_bmdma_stop(ap
);
2263 tag
= nv_swncq_tag(ap
);
2265 DPRINTK("dma setup tag 0x%x\n", tag
);
2266 qc
= ata_qc_from_tag(ap
, tag
);
2271 rw
= qc
->tf
.flags
& ATA_TFLAG_WRITE
;
2273 /* load PRD table addr. */
2274 iowrite32(pp
->prd_dma
+ ATA_PRD_TBL_SZ
* qc
->tag
,
2275 ap
->ioaddr
.bmdma_addr
+ ATA_DMA_TABLE_OFS
);
2277 /* specify data direction, triple-check start bit is clear */
2278 dmactl
= ioread8(ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
2279 dmactl
&= ~ATA_DMA_WR
;
2281 dmactl
|= ATA_DMA_WR
;
2283 iowrite8(dmactl
| ATA_DMA_START
, ap
->ioaddr
.bmdma_addr
+ ATA_DMA_CMD
);
2288 static void nv_swncq_host_interrupt(struct ata_port
*ap
, u16 fis
)
2290 struct nv_swncq_port_priv
*pp
= ap
->private_data
;
2291 struct ata_queued_cmd
*qc
;
2292 struct ata_eh_info
*ehi
= &ap
->link
.eh_info
;
2297 ata_stat
= ap
->ops
->check_status(ap
);
2298 nv_swncq_irq_clear(ap
, fis
);
2302 if (ap
->pflags
& ATA_PFLAG_FROZEN
)
2305 if (fis
& NV_SWNCQ_IRQ_HOTPLUG
) {
2306 nv_swncq_hotplug(ap
, fis
);
2313 if (ap
->ops
->scr_read(ap
, SCR_ERROR
, &serror
))
2315 ap
->ops
->scr_write(ap
, SCR_ERROR
, serror
);
2317 if (ata_stat
& ATA_ERR
) {
2318 ata_ehi_clear_desc(ehi
);
2319 ata_ehi_push_desc(ehi
, "Ata error. fis:0x%X", fis
);
2320 ehi
->err_mask
|= AC_ERR_DEV
;
2321 ehi
->serror
|= serror
;
2322 ehi
->action
|= ATA_EH_RESET
;
2323 ata_port_freeze(ap
);
2327 if (fis
& NV_SWNCQ_IRQ_BACKOUT
) {
2328 /* If the IRQ is backout, driver must issue
2329 * the new command again some time later.
2331 pp
->ncq_flags
|= ncq_saw_backout
;
2334 if (fis
& NV_SWNCQ_IRQ_SDBFIS
) {
2335 pp
->ncq_flags
|= ncq_saw_sdb
;
2336 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2337 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2338 ap
->print_id
, pp
->qc_active
, pp
->dhfis_bits
,
2339 pp
->dmafis_bits
, readl(pp
->sactive_block
));
2340 rc
= nv_swncq_sdbfis(ap
);
2345 if (fis
& NV_SWNCQ_IRQ_DHREGFIS
) {
2346 /* The interrupt indicates the new command
2347 * was transmitted correctly to the drive.
2349 pp
->dhfis_bits
|= (0x1 << pp
->last_issue_tag
);
2350 pp
->ncq_flags
|= ncq_saw_d2h
;
2351 if (pp
->ncq_flags
& (ncq_saw_sdb
| ncq_saw_backout
)) {
2352 ata_ehi_push_desc(ehi
, "illegal fis transaction");
2353 ehi
->err_mask
|= AC_ERR_HSM
;
2354 ehi
->action
|= ATA_EH_RESET
;
2358 if (!(fis
& NV_SWNCQ_IRQ_DMASETUP
) &&
2359 !(pp
->ncq_flags
& ncq_saw_dmas
)) {
2360 ata_stat
= ap
->ops
->check_status(ap
);
2361 if (ata_stat
& ATA_BUSY
)
2364 if (pp
->defer_queue
.defer_bits
) {
2365 DPRINTK("send next command\n");
2366 qc
= nv_swncq_qc_from_dq(ap
);
2367 nv_swncq_issue_atacmd(ap
, qc
);
2372 if (fis
& NV_SWNCQ_IRQ_DMASETUP
) {
2373 /* program the dma controller with appropriate PRD buffers
2374 * and start the DMA transfer for requested command.
2376 pp
->dmafis_bits
|= (0x1 << nv_swncq_tag(ap
));
2377 pp
->ncq_flags
|= ncq_saw_dmas
;
2378 rc
= nv_swncq_dmafis(ap
);
2384 ata_ehi_push_desc(ehi
, "fis:0x%x", fis
);
2385 ata_port_freeze(ap
);
2389 static irqreturn_t
nv_swncq_interrupt(int irq
, void *dev_instance
)
2391 struct ata_host
*host
= dev_instance
;
2393 unsigned int handled
= 0;
2394 unsigned long flags
;
2397 spin_lock_irqsave(&host
->lock
, flags
);
2399 irq_stat
= readl(host
->iomap
[NV_MMIO_BAR
] + NV_INT_STATUS_MCP55
);
2401 for (i
= 0; i
< host
->n_ports
; i
++) {
2402 struct ata_port
*ap
= host
->ports
[i
];
2404 if (ap
&& !(ap
->flags
& ATA_FLAG_DISABLED
)) {
2405 if (ap
->link
.sactive
) {
2406 nv_swncq_host_interrupt(ap
, (u16
)irq_stat
);
2409 if (irq_stat
) /* reserve Hotplug */
2410 nv_swncq_irq_clear(ap
, 0xfff0);
2412 handled
+= nv_host_intr(ap
, (u8
)irq_stat
);
2415 irq_stat
>>= NV_INT_PORT_SHIFT_MCP55
;
2418 spin_unlock_irqrestore(&host
->lock
, flags
);
2420 return IRQ_RETVAL(handled
);
2423 static int nv_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2425 static int printed_version
;
2426 const struct ata_port_info
*ppi
[] = { NULL
, NULL
};
2427 struct ata_host
*host
;
2428 struct nv_host_priv
*hpriv
;
2432 unsigned long type
= ent
->driver_data
;
2434 // Make sure this is a SATA controller by counting the number of bars
2435 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2436 // it's an IDE controller and we ignore it.
2437 for (bar
= 0; bar
< 6; bar
++)
2438 if (pci_resource_start(pdev
, bar
) == 0)
2441 if (!printed_version
++)
2442 dev_printk(KERN_DEBUG
, &pdev
->dev
, "version " DRV_VERSION
"\n");
2444 rc
= pcim_enable_device(pdev
);
2448 /* determine type and allocate host */
2449 if (type
== CK804
&& adma_enabled
) {
2450 dev_printk(KERN_NOTICE
, &pdev
->dev
, "Using ADMA mode\n");
2454 if (type
== SWNCQ
) {
2456 dev_printk(KERN_NOTICE
, &pdev
->dev
,
2457 "Using SWNCQ mode\n");
2462 ppi
[0] = &nv_port_info
[type
];
2463 rc
= ata_pci_prepare_sff_host(pdev
, ppi
, &host
);
2467 hpriv
= devm_kzalloc(&pdev
->dev
, sizeof(*hpriv
), GFP_KERNEL
);
2471 host
->private_data
= hpriv
;
2473 /* request and iomap NV_MMIO_BAR */
2474 rc
= pcim_iomap_regions(pdev
, 1 << NV_MMIO_BAR
, DRV_NAME
);
2478 /* configure SCR access */
2479 base
= host
->iomap
[NV_MMIO_BAR
];
2480 host
->ports
[0]->ioaddr
.scr_addr
= base
+ NV_PORT0_SCR_REG_OFFSET
;
2481 host
->ports
[1]->ioaddr
.scr_addr
= base
+ NV_PORT1_SCR_REG_OFFSET
;
2483 /* enable SATA space for CK804 */
2484 if (type
>= CK804
) {
2487 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
2488 regval
|= NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
2489 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
2494 rc
= nv_adma_host_init(host
);
2497 } else if (type
== SWNCQ
)
2498 nv_swncq_host_init(host
);
2500 pci_set_master(pdev
);
2501 return ata_host_activate(host
, pdev
->irq
, ppi
[0]->irq_handler
,
2502 IRQF_SHARED
, ppi
[0]->sht
);
2506 static int nv_pci_device_resume(struct pci_dev
*pdev
)
2508 struct ata_host
*host
= dev_get_drvdata(&pdev
->dev
);
2509 struct nv_host_priv
*hpriv
= host
->private_data
;
2512 rc
= ata_pci_device_do_resume(pdev
);
2516 if (pdev
->dev
.power
.power_state
.event
== PM_EVENT_SUSPEND
) {
2517 if (hpriv
->type
>= CK804
) {
2520 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
2521 regval
|= NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
2522 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
2524 if (hpriv
->type
== ADMA
) {
2526 struct nv_adma_port_priv
*pp
;
2527 /* enable/disable ADMA on the ports appropriately */
2528 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
2530 pp
= host
->ports
[0]->private_data
;
2531 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
2532 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT0_EN
|
2533 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
);
2535 tmp32
|= (NV_MCP_SATA_CFG_20_PORT0_EN
|
2536 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
);
2537 pp
= host
->ports
[1]->private_data
;
2538 if (pp
->flags
& NV_ADMA_ATAPI_SETUP_COMPLETE
)
2539 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT1_EN
|
2540 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
2542 tmp32
|= (NV_MCP_SATA_CFG_20_PORT1_EN
|
2543 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
2545 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
2549 ata_host_resume(host
);
2555 static void nv_ck804_host_stop(struct ata_host
*host
)
2557 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2560 /* disable SATA space for CK804 */
2561 pci_read_config_byte(pdev
, NV_MCP_SATA_CFG_20
, ®val
);
2562 regval
&= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN
;
2563 pci_write_config_byte(pdev
, NV_MCP_SATA_CFG_20
, regval
);
2566 static void nv_adma_host_stop(struct ata_host
*host
)
2568 struct pci_dev
*pdev
= to_pci_dev(host
->dev
);
2571 /* disable ADMA on the ports */
2572 pci_read_config_dword(pdev
, NV_MCP_SATA_CFG_20
, &tmp32
);
2573 tmp32
&= ~(NV_MCP_SATA_CFG_20_PORT0_EN
|
2574 NV_MCP_SATA_CFG_20_PORT0_PWB_EN
|
2575 NV_MCP_SATA_CFG_20_PORT1_EN
|
2576 NV_MCP_SATA_CFG_20_PORT1_PWB_EN
);
2578 pci_write_config_dword(pdev
, NV_MCP_SATA_CFG_20
, tmp32
);
2580 nv_ck804_host_stop(host
);
2583 static int __init
nv_init(void)
2585 return pci_register_driver(&nv_pci_driver
);
2588 static void __exit
nv_exit(void)
2590 pci_unregister_driver(&nv_pci_driver
);
2593 module_init(nv_init
);
2594 module_exit(nv_exit
);
2595 module_param_named(adma
, adma_enabled
, bool, 0444);
2596 MODULE_PARM_DESC(adma
, "Enable use of ADMA (Default: true)");
2597 module_param_named(swncq
, swncq_enabled
, bool, 0444);
2598 MODULE_PARM_DESC(swncq
, "Enable use of SWNCQ (Default: false)");